text stringlengths 8 6.05M |
|---|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.DataFrame({
'n_min': [1,1,1,1,1,2,2,2,2,3,3,3,4,4,5],
'n_max': [1,2,3,4,5,2,3,4,5,3,4,5,4,5,5],
'F-m': [0.8373,0.5696,0.45,0.413,0.3945,0.6359,0.5978,0.578,0.5715,0.6849,0.6922,0.6929,0.6958,0.6917,0.5631]})
result = df.pivot(index='n_min', columns='n_max', values='F-m')
heatmap = sns.heatmap(result, annot=True, cmap='gray_r', vmin=0.5, vmax=0.9)
heatmap.xaxis.tick_top()
heatmap.set_xlabel('n_max')
heatmap.xaxis.set_label_position('top')
plt.show()
|
from distutils.core import setup
setup(
name='regraph',
version='1.0',
description='Make graphs from standard input based on regular expression.',
author='Guilherme Starvaggi Franca',
author_email='guifranca@gmail.com',
scripts=['regraph'],
)
|
import socket
import json
import pickle
from io import BytesIO
class PSQClient:
def __init__(self, port):
self.port = port
def _send(self, connection, some_bytes):
message_size = str(len(some_bytes)).encode()
size_length = len(message_size)
connection.sendall(bytes([size_length]) + message_size)
connection.sendall(some_bytes)
def _receive(self, connection):
sz_length = int.from_bytes(connection.recv(1), byteorder='little')
message_size = int(connection.recv(sz_length).decode())
input_buffer = BytesIO(b' ' * message_size)
read = 0
while read < message_size:
some_bytes = connection.recv(16384)
input_buffer.write(some_bytes)
read += len(some_bytes)
input_buffer.seek(0)
return input_buffer.getvalue()
def get_psq(self, query_id):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.port))
request = {'type': 'get_psq', 'query_id': query_id}
connection.sendall(json.dumps(request).encode())
result = pickle.loads(self._receive(connection))
connection.close()
if isinstance(result, RuntimeError):
raise result
return result
def add_psq(self, query_id, psq_dict, idf_dict):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.port))
request = {'type': 'add_psq', 'query_id': query_id,
'psq_dict':psq_dict, 'idf_dict':idf_dict}
connection.sendall(json.dumps(request).encode())
result = pickle.loads(self._receive(connection))
assert result == 'SUCCESS'
def get_psq():
import argparse
parser = argparse.ArgumentParser("Query psq server.")
parser.add_argument("port", type=int)
parser.add_argument("query_id", type=str)
parser.add_argument("--pretty", action="store_true")
args = parser.parse_args()
client = PSQClient(args.port)
psq = client.get_psq(args.query_id)
if args.pretty:
from pprint import pprint
pprint(psq)
else:
print(json.dumps(psq))
|
from django import forms
from django.forms import ModelForm
from classes.models import Restaurant, Food
# from crispy_forms.helper import FormHelper
# from django.validator import validate_slug
class AddRestaurantForm(ModelForm):
class Meta:
model = Restaurant
fields = ['restaurant_name','picture_restaurant', 'working_hours']
label = {
'restaurant_name': 'ชื่อร้านอาหาร',
'working_hours': 'เวลาเปิด/ปิดร้านอาหาร',
'picture_restaurant': 'รูปภาพร้านอาหาร'
}
class EditRestaurantForm(ModelForm):
class Meta:
model = Restaurant
fields = ['restaurant_name','picture_restaurant', 'working_hours']
label = {
'restaurant_name': 'ชื่อร้านอาหาร',
'working_hours': 'เวลาเปิด/ปิดร้านอาหาร',
'picture_restaurant': 'รูปภาพร้านอาหาร'
}
class AddFoodForm(ModelForm):
class Meta:
model = Food
fields = ['food_name', 'picture', 'price']
label = {
'food_name': 'ชื่ออาหาร',
'price': 'ราคา',
'picture': 'รูปร้านอาหาร'
}
|
from flask import Flask, redirect, url_for
app = Flask(__name__)
#statis route
@app.route("/welcome")
def hellow():
return "Hello world!"
#dynamic route
@app.route("/<name>")
def printname(name):
return f"hello {name}!"
#redirect to another page if opened a page that is not for the user(say)
@app.route("/admin")
def admin():
return redirect(url_for("printname", name="Admin!"))
if(__name__=='__main__'):
app.run(debug=True, host="localhost", port= 3000)
|
#!/usr/bin/env python
'''
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b
are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
'''
amicableNumList =[]
def sum_of_divisors(n):
summ = 0
for i in xrange(1, n):
if not n % i:
summ += i
return summ
for num in xrange(1, 10000):
if sum_of_divisors(num) != sum_of_divisors(sum_of_divisors(num)) and \
sum_of_divisors(num) == sum_of_divisors(sum_of_divisors(sum_of_divisors(num))):
amicableNumList.append(sum_of_divisors(num))
print 'The sum of all the amicable numbers under 10000 is %d.' % sum(set(amicableNumList)) #31626
|
# Generated by Django 4.0.5 on 2022-08-20 11:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0018_time'),
]
operations = [
migrations.AddField(
model_name='livedata',
name='toGameT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toGameT2L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT1L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT1L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT1L3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT2L1',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT2L2',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='livedata',
name='toPeriodT2L3',
field=models.IntegerField(null=True),
),
]
|
"""
Save auto repair notes.
Created on 25.05.2017
@author: Ruslan Dolovanyuk
"""
import sqlite3
from drawer import Drawer
def setup(conn, cursor):
"""Create table in database."""
script = '''CREATE TABLE window (
id INTEGER PRIMARY KEY NOT NULL,
px INTEGER NOT NULL,
py INTEGER NOT NULL,
sx INTEGER NOT NULL,
sy INTEGER NOT NULL) WITHOUT ROWID
'''
cursor.execute(script)
script = '''INSERT INTO window (id, px, py, sx, sy)
VALUES (1, 0, 0, 800, 600)'''
cursor.execute(script)
script = '''CREATE TABLE category (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL) WITHOUT ROWID
'''
cursor.execute(script)
script = '''CREATE TABLE main (
id INTEGER PRIMARY KEY NOT NULL,
title TEXT NOT NULL,
date TEXT NOT NULL,
company TEXT,
model TEXT,
serial TEXT,
data TEXT,
category INTEGER NOT NULL) WITHOUT ROWID
'''
cursor.execute(script)
conn.commit()
if __name__ == '__main__':
conn = sqlite3.connect('autonotes.db')
cursor = conn.cursor()
str_sql = 'SELECT * FROM sqlite_master WHERE name = "main"'
cursor.execute(str_sql)
if not cursor.fetchone():
setup(conn, cursor)
drawer = Drawer(conn, cursor)
drawer.mainloop()
cursor.close()
conn.close()
|
from django.conf.urls import url
from django.urls import path
from . import views
app_name = "login_app"
urlpatterns = [
path('', views.home, name="home"),
path('signup/', views.signUp, name="signup"),
path('signin/', views.signIn, name="signin"),
path('signout/', views.signOut, name='signout'),
path('profile/', views.user_profile, name="profile"),
path('change_info/', views.user_info_change, name="change_info"),
path("password/", views.pass_change, name="pass_change"),
path("add_pic/", views.add_pro_pic, name="add_pic"),
path("change_pic/", views.change_pro_pic, name="change_pic"),
]
|
#-*- coding=utf-8 -*-
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_pagedown import PageDown
#from celery import Celery, platforms
import logging
import datetime
from datetime import timedelta
import os
basedir=os.path.abspath('.')
log_=os.path.join(basedir,'logs/l4j.us.log')
# 日志记录
logger = logging.getLogger("video4sex")
logger.setLevel(logging.DEBUG)
ch = logging.FileHandler(log_)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
app = Flask(__name__)
app.config.from_object('config')
login_manager = LoginManager(app)
login_manager.session_protect = 'strong'
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap(app)
mail = Mail(app)
pagedown = PageDown(app)
db = SQLAlchemy(app, use_native_unicode='utf8')
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix='/auth')
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint,url_prefix='/admin')
from app import views |
from .main import process_sql
# This function gets all of the payment records from the database
# and is used to handle searching and sorting so that only the records the user wants to see are fetched.
def get_payments(type_search, datetime_sorts, search_field, search_term):
parameters = ()
search = ''
# if there is a search term, add the search to the query
if search_term not in ('*', ''):
search = f'AND {search_field} LIKE ? COLLATE NOCASE'
if type_search not in ('Inbound', 'Outbound'):
parameters = (f"{search_term}%", f"{search_term}%")
else:
parameters = (f"{search_term}%",)
sql_inbounds = f"""SELECT PaymentsTbl.PaymentID, PaymentType,
(TenantTbl.TenantForename || ' ' || TenantTbl.TenantSurname) AS Payee,
PaymentMethod, CAST(TotalPaid AS REAL), PaymentDate, PaymentTime
FROM ((PaymentsTbl INNER JOIN FlatPaymentsTbl
ON PaymentsTbl.PaymentID = FlatPaymentsTbl.PaymentID)
INNER JOIN TenantTbl
ON FlatPaymentsTbl.TenantID = TenantTbl.TenantID)
WHERE PaymentsTbl.PaymentType = 'Inbound' {search}"""
sql_outbounds = f"""SELECT PaymentsTbl.PaymentID, PaymentType,
(EmployeeTbl.EmployeeForename || ' ' || EmployeeTbl.EmployeeSurname) AS Payee,
PaymentMethod, CAST(TotalPaid AS REAL), PaymentDate, PaymentTime
FROM ((PaymentsTbl INNER JOIN EmployeePaymentsTbl
ON PaymentsTbl.PaymentID = EmployeePaymentsTbl.PaymentID)
INNER JOIN EmployeeTbl
ON EmployeePaymentsTbl.EmployeeID = EmployeeTbl.EmployeeID)
WHERE PaymentsTbl.PaymentType = 'Outbound' {search}"""
if type_search == 'Inbound':
sql = sql_inbounds
elif type_search == 'Outbound':
sql = sql_outbounds
else:
sql = f"""{sql_inbounds}
UNION ALL
{sql_outbounds}"""
sql += f"""\nORDER BY PaymentDate {datetime_sorts['Date']},
PaymentTime {datetime_sorts['Time']}"""
return process_sql(sql, parameters=parameters)
def get_payment(payment_id, payment_type):
if payment_type == "Inbound":
sql = f"""SELECT (TenantTbl.TenantID || ' ' ||
TenantTbl.TenantForename || ' ' || TenantTbl.TenantSurname) AS Payee,
PaymentMethod, TotalPaid, PaymentDate, PaymentTime, PaymentDescription
FROM ((PaymentsTbl INNER JOIN FlatPaymentsTbl
ON PaymentsTbl.PaymentID = FlatPaymentsTbl.PaymentID)
INNER JOIN TenantTbl
ON FlatPaymentsTbl.TenantID = TenantTbl.TenantID)
WHERE PaymentsTbl.PaymentID = ?"""
else:
sql = f"""SELECT (EmployeeTbl.EmployeeID || ' ' ||
EmployeeTbl.EmployeeForename || ' ' || EmployeeTbl.EmployeeSurname) AS Payee,
PaymentMethod, TotalPaid, PaymentDate, PaymentTime, PaymentDescription
FROM ((PaymentsTbl INNER JOIN EmployeePaymentsTbl
ON PaymentsTbl.PaymentID = EmployeePaymentsTbl.PaymentID)
INNER JOIN EmployeeTbl
ON EmployeePaymentsTbl.EmployeeID = EmployeeTbl.EmployeeID)
WHERE PaymentsTbl.PaymentID = ?"""
return process_sql(sql, parameters=(payment_id,))
def get_payees(payment_type):
if payment_type == 'Inbound':
sql = "SELECT TenantID, TenantForename, TenantSurname FROM TenantTbl"
else:
sql = "SELECT EmployeeID, EmployeeForename, EmployeeSurname FROM EmployeeTbl"
return process_sql(sql)
def add_payment(record, payee_id):
sql = """INSERT INTO PaymentsTbl(PaymentType, PaymentMethod, TotalPaid,
PaymentDate, PaymentTime, PaymentDescription)
VALUES (?, ?, ?, ?, ?, ?)"""
process_sql(sql, parameters=record)
if record[0] == "Outbound":
sql = """INSERT INTO EmployeePaymentsTbl(PaymentID, EmployeeID)
VALUES (last_insert_rowid(), ?)"""
else:
sql = """INSERT INTO FlatPaymentsTbl(PaymentID, TenantID)
VALUES (last_insert_rowid(), ?)"""
process_sql(sql, parameters=(payee_id,))
def edit_payment(payment_id, payment_type, record, payee_id):
sql = """UPDATE PaymentsTbl SET
PaymentMethod = ?,
TotalPaid = ?,
PaymentDate = ?,
PaymentTime = ?,
PaymentDescription = ?
WHERE PaymentID = ?"""
process_sql(sql, parameters=(*record, payment_id))
if payment_type == "Inbound":
sql = """UPDATE FlatPaymentsTbl SET
TenantID = ?
WHERE PaymentID = ?"""
else:
sql = """UPDATE EmployeePaymentsTbl SET
EmployeeID = ?
WHERE PaymentID = ?"""
process_sql(sql, parameters=(payee_id, payment_id,))
# This function deletes the payments from both the link tables and the payment table
def delete_payment(payment_id, payment_type):
sql = "DELETE FROM PaymentsTbl WHERE PaymentID = ?"
process_sql(sql, parameters=(payment_id,))
if payment_type == "Inbound":
sql = "DELETE FROM FlatPaymentsTbl WHERE PaymentID = ?"
else:
sql = "DELETE FROM EmployeePaymentsTbl WHERE PaymentID = ?"
process_sql(sql, parameters=(payment_id,))
|
import requests
import json
import file_result.result
def getFileScanId(url,apikey,a,b):
# /file/scan
# /文件/扫描
# 上传并扫描文件
# 限制为32MB
params = {'apikey': apikey}
files = {'file': (a, open(b, 'rb'))}
response = requests.post(url, files=files, params=params)
my_scan_id = str(response.json()['scan_id'])
return my_scan_id
def getFieReportResult(url,apikey,my_scan_id):
#/file/report
# /文件/报告
# 检索文件扫描报告
#该resource参数可以是要获取最新的病毒报告文件的MD5,SHA-1或SHA-256。
#还可以指定/ file / scan端点scan_id返回的值。
#如果allinfo参数设置为true除了返回防病毒结果之外的其他信息。
get_params = {'apikey': apikey, 'resource': my_scan_id,'allinfo': '1'}
response2 = requests.get(url, params=get_params)
jsondata = json.loads(response2.text)
with open("jsonResult.json","w") as f:
json.dump(jsondata, f, indent=4)
return jsondata
def getResult(json):
result = {}
for k,v in json["scans"].items():
result[k] = v['detected']
print(result)
print("一共有{0}条杀毒数据".format(len(result)))
with open("result.txt","w") as g:
g.write(str(result))
def uploadFile(file_name, file_src):
#file_name = input("请输入文件名:")
a = file_name
#file_src = input("请输入文件路径:")
b = file_src
url1 = 'https://www.virustotal.com/vtapi/v2/file/scan'
url2 = "https://www.virustotal.com/vtapi/v2/file/report"
# #需要提供密钥,否者会出现403错误
apikey = "968e3bc6d33c79c2b957696cf53b3f7c9c607411ee623e67dd3b57d52f8986e4"
# #获得文件scan_id
# scan_id = getFileScanId(url1,apikey,a,b)
# #获得返回的json结果并写入result文件
# #getFieReportResult(url2, apikey, scan_id)
# json = getFieReportResult(url2,apikey,scan_id)
json = file_result.result.pick_file(file_name)
data = json["scans"]
mylist =[]
mydict = {
'name': '',
'version' : '',
'detected' : ''
}
#i = 0
for key in data:
mydict['name'] = key
mydict['version'] = data[key]["version"]
mydict['detected'] = data[key]["detected"]
#不使用copy的话会导致列表元素完全相同
mylist.append(mydict.copy())
return mylist
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 13:55:11 2019
@author: Joule
"""
from Aksjon import Aksjon
class EnkeltSpill:
"""Singelt spill"""
def __init__(self, spiller1, spiller2):
self.spiller1 = spiller1
self.spiller2 = spiller2
self.vinner = None
self.action1 = None
self.action2 = None
self.points = None
def gjennomfoer_spill(self):
"""utfør spillet og gi respons til spillerne med resultatet"""
self.action1 = Aksjon(self.spiller1.velg_aksjon())
self.action2 = Aksjon(self.spiller2.velg_aksjon())
if self.action1 == self.action2:
self.spiller1.motta_resultat(0.5)
self.spiller2.motta_resultat(0.5)
self.vinner = None
self.points = 0.5
elif self.action1 > self.action2:
self.spiller1.motta_resultat(1)
self.spiller2.motta_resultat(0)
self.vinner = self.spiller1.oppgi_navn()
self.points = 1
else:
self.spiller1.motta_resultat(0)
self.spiller2.motta_resultat(1)
self.vinner = self.spiller2.oppgi_navn()
self.points = 0
def __str__(self):
self.gjennomfoer_spill()
return(self.spiller1.oppgi_navn() +
" valgte " + str(self.action1.__str__()) +
" mens " + self.spiller2.oppgi_navn() + " valgte "
+ str(self.action2.__str__()) + " og vinneren ble " + str(self.vinner))
|
import unittest
from pycolorizer import (
Color,
NoStyleFoundError,
InvalidStyleNameError,
RecursionInThemeError
)
class ColorTest(unittest.TestCase):
def setUp(self):
self.color = Color()
def test_given_string_should_apply_style(self):
self.assertEqual(
"\033[31mfoo\033[0m",
str(self.color('foo').red())
)
def test_given_string_should_apply_more_than_one_style(self):
self.assertEqual(
"\033[1m\033[97mfoo\033[0m\033[0m",
str(self.color('foo').white().bold())
)
def test_style_name_is_not_case_sensitive(self):
self.assertEqual(
"\033[31mfoo\033[0m",
str(self.color('foo').RED())
)
def test_state_is_initialized(self):
self.assertEqual('foo', str(self.color('foo')))
self.assertEqual('bar', str(self.color('bar')))
def test_given_styled_string_should_be_able_to_reused(self):
self.assertEqual('foo', str(self.color('foo').blue().reset()))
def test_raise_error_when_style_not_found(self):
self.assertRaises(
NoStyleFoundError,
lambda: self.color.color('foo bar').foo()
)
def test_style_can_contain_text(self):
self.assertEqual(
str(self.color('foo').blue()),
self.color().blue('foo')
)
def test_shortcut_foreground(self):
self.assertEqual(
str(self.color('Hello').blue()),
str(self.color('Hello').fg('blue'))
)
def test_shortcut_background(self):
self.assertEqual(
str(self.color('Hello').bg_red()),
str(self.color('Hello').bg('red'))
)
def test_has_highlight_shortcut_for_background(self):
self.assertEqual(
str(self.color('Hello').bg_blue()),
str(self.color('Hello').highlight('blue'))
)
def test_should_support_themes(self):
self.color.set_themes({'error': 'red'})
self.assertEqual(
str(self.color('Error...').red()),
str(self.color('Error...').error())
)
def test_thmes_can_override_default_styles(self):
self.color.set_themes({'white': 'red'})
self.assertEqual(
str(self.color('Warning...').red()),
str(self.color('Warning...').white())
)
def test_given_invalid_them_name_should_raise_error(self):
self.assertRaises(
InvalidStyleNameError,
lambda: self.color('foo bar').set_themes({'&é""': "white"})
)
def test_given_styled_string_can_be_cleaned(self):
self.assertEqual(
'some text',
str(self.color(str(self.color('some text').red())).clean())
)
def test_given_string_with_style_tags_should_be_interpret(self):
text = 'This is <red>some text</red>'
self.assertEqual(
'This is ' + str(self.color('some text').red()),
str(self.color(text).colorize())
)
def test_given_string_with_nested_tags_should_be_interpret(self):
actual = str(
self.color('<cyan>Hello <bold>World!</bold></cyan>')
.colorize()
)
expected = str(
self.color('Hello ' + str(self.color('World!').bold()))
.cyan()
)
self.assertEqual(expected, actual)
def test_apply(self):
self.assertEqual(
str(self.color('foo').blue()),
str(self.color().apply('blue', 'foo'))
)
def test_apply_center(self):
width = 80
for text in ('', 'hello', 'hellow world', '✩'):
current_width = len(
str(self.color(text).center(width))
)
self.assertEqual(width, current_width)
current_width = len(
str(
self.color(text)
.center(width)
.bg('blue')
.clean()
)
)
self.assertEqual(width, current_width)
def test_apply_center_multiline(self):
width = 80
color = Color()
text = 'hello' + "\n" + '✩' + "\n" + 'world'
actual = str(color(text).center(width))
for line in actual.split("\n"):
self.assertEqual(width, len(line))
def test_should_support_256_colors(self):
self.assertEqual(
"\033[38;5;3mfoo\033[0m",
self.color().apply('color[3]', 'foo')
)
self.assertEqual(
"\033[48;5;3mfoo\033[0m",
self.color().apply('bg_color[3]', 'foo')
)
def test_given_invalid_color_number_should_raise_error(self):
self.assertRaises(
NoStyleFoundError,
lambda: self.color().apply('color[-1]', 'foo')
)
self.assertRaises(
NoStyleFoundError,
lambda: self.color().apply('color[256]', 'foo')
)
def test_should_handle_recursion_in_theme_with_list(self):
self.assertRaises(
RecursionInThemeError,
lambda: self.color().set_themes(
{
'green': ['green'],
}
)
)
def test_should_handle_recursion_in_theme_with_string(self):
self.assertRaises(
RecursionInThemeError,
lambda: self.color().set_themes(
{
'green': 'green',
}
)
)
|
'''
This script will pass input values to a Boom Crane module to receive the
dynamic response of such a crane to the given parameters. It will
then plot the relevant results.
Created by: Daniel Newman
Date: 09-28-2016
'''
import warnings
warnings.simplefilter("ignore", UserWarning)
import sys
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
import numpy as np
import InputShaping as shaping
import trolley_pendulum as TP
import Generate_Plots as genplt
import os
# define constants
DEG_TO_RAD = np.pi / 180
G = 9.81 # m / s**2
# t vector characteristics
tmax = 35
t_step = .01
t = np.arange(0, tmax, t_step)
StartTime = np.array([0.])
Shaper = 'ZV Shaped'
# Geometry of the system
mass = 10 # Mass of the payload in kg
# Initial angular conditions
x_init = 0. # Luff angle
x_dot_init = 0.
theta_init = 0.
theta_dot_init = 0.
theta_init *= DEG_TO_RAD
theta_dot_init *= DEG_TO_RAD
l = 9.81
X0 = np.array([theta_init,theta_dot_init, x_init, x_dot_init])
# Actuator constraints
Amax = 20. # m / s^2
Vmax = 2. # m / s
# C = array of actuator constraints
C = np.array([Amax, Vmax])
# Desired final angles
x_fin = 20.
theta_fin = 0.
Distance = np.array([x_fin - x_init])
# Pack relevant variables
p = [[Amax,Vmax], l, StartTime, x_init,x_fin, t_step, t, X0, Distance]
# Call Boom crane response based on given values
zv_shaped_response = TP.response(p,'ZV Shaped')
# Call Boom crane response based on given values
unshaped_response = TP.response(p, 'Unshaped')
figfold = 'Figures/{}/Distance={}'.format(sys.argv[0],x_fin - x_init)
genplt.compare_responses(t,zv_shaped_response[:,0],'ZV Shaped',title='Vibration Amplitude',xlabel='Time (sec)',ylabel='Vibration Amplitude (rad)',folder=figfold)
genplt.compare_responses(t,zv_shaped_response[:,2],'ZV Shaped',title='Cart Position',xlabel='Time (sec)',ylabel='Cart Position',folder=figfold)
genplt.compare_responses(t,zv_shaped_response[:,3],'ZV Shaped',title='Cart Velocity',xlabel='Time (sec)',ylabel='Cart Position',folder=figfold)
|
#!/usr/bin/env python3
# SPDX-License-Identifier: CC0-1.0
import sys
import urllib.request
import urllib.parse
import cgi
from collections import namedtuple
from typing import Optional, Tuple
COUNT_DNC = 44053
COUNT_PODESTA = 59028
COUNT_CLINTON = 33727
#USER_AGENT = 'Mozilla/5.0'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0'
HeadInfo = namedtuple('HeadInfo', ['id', 'name', 'url', 'size'])
def get_clinton_pdf_url(id: int) -> Optional[str]:
import bs4
req = urllib.request.Request(
url=f'https://wikileaks.org/clinton-emails/emailid/{id}',
headers={'User-Agent': USER_AGENT}
)
with urllib.request.urlopen(req) as x:
html = bs4.BeautifulSoup(x.read().decode('utf-8'), 'html5lib')
sourcediv = html.find('div', id='source')
if not sourcediv:
print(f'{id}: missing source <div>', file=sys.stderr)
return None
sourcea = sourcediv.find('a')
if not sourcea:
print(f'{id}: missing source <a>', file=sys.stderr)
return None
url = sourcea.get('href')
if not url:
print(f'{id}: <a> missing href', file=sys.stderr)
return None
return urllib.parse.urljoin('https://wikileaks.org/', url)
##
# $ curl --head https://wikileaks.org/clinton-emails/Clinton_Email_August_Release/C05777221.pdf
# HTTP/1.1 200 OK
# Server: nginx
# Date: Tue, 08 Dec 2020 06:34:29 GMT
# Content-Type: application/pdf
# Content-Length: 48833
# Connection: keep-alive
# Last-Modified: Wed, 02 Mar 2016 23:20:46 GMT
# X-Content-Type-Options: nosniff
# X-Cache: 0
# X-Content-Type-Options: nosniff
# X-XSS-PROTECTION: 1; mode=block
# Strict-Transport-Security: max-age=31536000; includeSubDomains; preload
##
# def get_clinton_pdf(url):
# req = urllib.request.Request(
# url=url,
# headers={'User-Agent': USER_AGENT}
# )
#
# with urllib.request.urlopen(req) as x:
# ct = x.getheader('Content-Type')
# if ct is None:
# print(f'{}')
def get_clinton_pdf(id: int) -> Optional[Tuple[bytes, str]]:
url = get_clinton_pdf_url(id)
req = urllib.request.Request(
url=url,
headers={'User-Agent': USER_AGENT}
)
with urllib.request.urlopen(req) as x:
ct = x.getheader('Content-Type')
if ct is None:
print(f'{id}: Missing Content-Type header', file=sys.stderr)
return None
if 'application/pdf' != ct.tolower():
print(f'{id}: Content-Type is not application/pdf', file=sys.stderr)
return None
cl = x.getheader('Content-Length')
if cl is None:
print(f'{id}: Missing Content-Length header', file=sys.stderr)
return None
cl = int(cl)
return None
def _head_eml(id: int, urltag: str, logprefix: str) -> Optional[HeadInfo]:
url=f'https://wikileaks.org/{urltag}/get/{id}'
req = urllib.request.Request(
url=url,
headers={'User-Agent': USER_AGENT},
method='HEAD'
)
with urllib.request.urlopen(req) as x:
cd = x.getheader('Content-Disposition')
if cd is None:
print(f'{logprefix} {id}: Missing Content-Disposition header', file=sys.stderr)
return None
value, params = cgi.parse_header(cd)
if 'filename' not in params:
print(f'{logprefix} {id}: No filename field in Content-Disposition')
return None
size = x.getheader('Contnet-Length')
if size:
size = int(size)
return HeadInfo(id, params["filename"], url, size)
def _get_eml(id: int, urltag: str, logprefix: str) -> Optional[Tuple[bytes, HeadInfo]]:
url=f'https://wikileaks.org/{urltag}/get/{id}'
req = urllib.request.Request(
url=url,
headers={'User-Agent': USER_AGENT}
)
with urllib.request.urlopen(req) as x:
cd = x.getheader('Content-Disposition')
if cd is None:
print(f'{logprefix} {id}: Missing Content-Disposition header', file=sys.stderr)
return None
value, params = cgi.parse_header(cd)
if 'filename' not in params:
print(f'{logprefix} {id}: No filename field in Content-Disposition')
return None
return (x.read(), HeadInfo(id, params["filename"], url))
def get_dnc_eml(id: int) -> Optional[Tuple[bytes, HeadInfo]]:
return _get_eml(id, 'dnc-emails', 'DNC')
def get_podesta_eml(id: int) -> Optional[Tuple[bytes, HeadInfo]]:
return _get_eml(id, 'podesta-emails', 'Podesta')
def head_dnc_eml(id: int) -> Optional[HeadInfo]:
return _head_eml(id, 'dnc-emails', 'DNC')
def head_podesta_eml(id: int) -> Optional[HeadInfo]:
return _head_eml(id, 'podesta-emails', 'Podesta')
|
import json
import socket
import sys
import threading
import time
from flask import Flask
from flask_restful import Api, Resource, reqparse, abort
app = Flask(__name__)
api = Api(app)
"""REQUEST PARSING SECTION"""
"""Creates request parser for JSON data sent through requests"""
user_get_args = reqparse.RequestParser()
user_get_args.add_argument("username", type=str, help="Username is required", required=True)
user_delete_args = reqparse.RequestParser()
user_delete_args.add_argument("username", type=str, help="Username is required", required=True)
room_post_args = reqparse.RequestParser()
room_post_args.add_argument("roomname", type=str, help="Roomname is required.", required=True)
room_post_args.add_argument("username", type=str, help="Username is required", required=True)
room_get_args = reqparse.RequestParser()
room_get_args.add_argument("username", type=str, help="Username is required", required=True)
room_user_put_args = reqparse.RequestParser()
room_user_put_args.add_argument("username", type=str, help="Username is required", required=True)
room_user_get_args = reqparse.RequestParser()
room_user_get_args.add_argument("username", type=str, help="Username is required", required=True)
room_user_delete_args = reqparse.RequestParser()
room_user_delete_args.add_argument("username", type=str, help="Username is required", required=True)
message_put_args = reqparse.RequestParser()
message_put_args.add_argument("username", type=str, help="Username is required", required=True)
message_put_args.add_argument("message", type=str, help="Message cannot be empty", required=True)
message_get_args = reqparse.RequestParser()
message_get_args.add_argument("username", type=str, help="Username is required", required=False)
users = []
"""User structure
["item", "item"]
"""
rooms = {}
'''
Rooms structure
{
0 : {
'roomname': name,
'userlist' : [user0, user1]
'message_list':[msg, msg]
}
}
'''
"""ABORT SECTION"""
"""Identifies different situations in which program needs to abort and defines response."""
def abort_if_user_not_exists(username):
if username not in users:
abort(404, message=f"Could not find user \"{username}\"")
def abort_if_user_exists(username):
if username in users:
abort(409, message=f"User already exists with ID \"{username}\"")
def abort_if_querier_does_not_match_user(querier, username):
if querier != username:
abort(401, message=f"You can only perform this action for your own user.")
def abort_if_room_not_exists(room_id):
if room_id not in rooms:
abort(404, message=f"Could not find room {room_id}")
def abort_if_room_exists(room_id):
if room_id in rooms:
abort(409, message=f"Room already exists with ID {room_id}")
def abort_if_room_empty(room_id):
if len(rooms[room_id]["userlist"]) == 0:
abort(404, message=f"Could not find any users for room {room_id}.")
def abort_if_roomuser_not_exists(room_id, username):
if username not in rooms[room_id]["userlist"]:
abort(409, message=f"User \"{username}\" was not found in room {room_id}")
def abort_if_roomuser_exists(room_id, username):
if username in rooms[room_id]["userlist"]:
abort(409, message=f"User \"{username}\" is already in room {room_id}")
def abort_if_message_list_empty(room_id):
if len(rooms[room_id]["message_list"]) == 0:
abort(404, message=f"Could not find any messages for room {room_id}")
def abort_if_querier_not_in_room(querier, room_id):
if room_id not in rooms:
abort(404, message=f"Could not find room {room_id}")
if querier not in rooms[room_id]['userlist']:
abort(401, message=f"You must be part of this room to query it.")
"""RESOURCES SECTION"""
"""Defines resources"""
class User(Resource):
"""Gets one or all users"""
def get(self, username=None):
querier = user_get_args.parse_args()
querier = querier['username']
abort_if_user_not_exists(querier)
if username == None:
return users, 200
else:
abort_if_user_not_exists(username)
if username in users:
return f"User \"{username}\" is online", 200
"""Adds one user"""
def post(self, username):
abort_if_user_exists(username)
users.append(username)
return username, 201
"""Deletes one user"""
def delete(self, username):
querier = user_delete_args.parse_args()
querier = querier['username']
abort_if_querier_does_not_match_user(querier, username)
abort_if_user_not_exists(username)
users.remove(username)
for room_id in rooms:
if username in rooms[room_id]['userlist']:
rooms[room_id]['userlist'].remove(username)
return '', 204
class Room(Resource):
"""Gets one or all rooms"""
def get(self, room_id=None):
querier = room_get_args.parse_args()
querier = querier['username']
abort_if_user_not_exists(querier)
if room_id is None:
formatted_rooms = {}
for key in rooms:
formatted_rooms[key] = {"roomname": rooms[key]['roomname'], "userlist": rooms[key]['userlist']}
return formatted_rooms, 200
else:
room_id = str(room_id)
abort_if_room_not_exists(room_id)
if querier in rooms[room_id]['userlist']:
"""If user who made the room request is in room, display all data"""
room_resp = rooms[room_id]
else:
"""Else, do not display messages"""
room_resp = {room_id : {"roomname": rooms[room_id]['roomname'], "userlist": rooms[room_id]['userlist']}}
return room_resp, 200
"""Creates a new room (must have a unique room ID)"""
def post(self, room_id):
querier = room_post_args.parse_args()
querier = querier['username']
abort_if_user_not_exists(querier)
room_id = str(room_id)
abort_if_room_exists(room_id)
args = room_post_args.parse_args()
room = {"roomname": args["roomname"], "userlist": [], "message_list": []}
rooms[room_id] = room
return room_id, 201
class RoomUser(Resource): #RoomUser = user in relation to a room
"""Gets one or all roomusers"""
def get(self, room_id, username=None):
querier = room_user_get_args.parse_args()
querier = querier['username']
abort_if_user_not_exists(querier)
room_id = str(room_id)
abort_if_room_not_exists(room_id)
if username is None:
return rooms[room_id]["userlist"], 201
else:
abort_if_room_empty(room_id)
abort_if_user_not_exists(username)
abort_if_roomuser_not_exists(room_id, username)
return username + " is in the room " + str(room_id), 201
"""Adds one user to room"""
def put(self, room_id, username):
querier = room_user_put_args.parse_args()
querier = querier['username']
abort_if_querier_does_not_match_user(querier, username)
room_id = str(room_id)
abort_if_user_not_exists(username)
abort_if_room_not_exists(room_id)
abort_if_roomuser_exists(room_id, username)
rooms[room_id]["userlist"].append(username)
return username + " is connected to " + str(room_id), 201
"""Deletes one user from room"""
def delete(self, room_id, username):
querier = room_user_delete_args.parse_args()
querier = querier['username']
abort_if_querier_does_not_match_user(querier, username)
room_id = str(room_id)
abort_if_user_not_exists(username)
abort_if_room_not_exists(room_id)
abort_if_roomuser_not_exists(room_id, username)
rooms[room_id]["userlist"].remove(username)
return username + " is removed from " + str(room_id), 201
class Message(Resource):
"""Gets messages from one or all users in a room"""
def get(self, room_id, username=None):
room_id = str(room_id)
querier = message_get_args.parse_args()
querier = querier["username"]
abort_if_querier_not_in_room(querier, room_id)
if username != None:
abort_if_roomuser_not_exists(room_id, username)
abort_if_message_list_empty(room_id)
usermsgs = []
for msg in rooms[room_id]["message_list"]:
if msg["username"] == username:
usermsgs.append(msg["message"])
return usermsgs, 200
else:
abort_if_message_list_empty(room_id)
return rooms[room_id]["message_list"], 200
"""Adds message to room"""
def put(self, room_id, username):
args = message_put_args.parse_args()
querier = args['username']
abort_if_querier_does_not_match_user(querier, username)
room_id = str(room_id)
abort_if_room_not_exists(room_id)
abort_if_roomuser_not_exists(room_id, username)
rooms[room_id]["message_list"].append(args)
return args, 201
"""ROUTE SECTION"""
api.add_resource(User, "/api/user/<string:username>", "/api/users")
api.add_resource(Room, "/api/room/<int:room_id>", "/api/rooms")
api.add_resource(RoomUser, "/api/room/<int:room_id>/user/<string:username>", "/api/room/<int:room_id>/users")
api.add_resource(Message, "/api/room/<int:room_id>/user/<string:username>/messages",
"/api/room/<int:room_id>/messages")
@app.route('/')
def index():
return "OBLIG 2"
"""Part of attempt of implementing push notifications"""
# api_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if __name__ == "__main__":
"""Part of attempt of implementing push notifications"""
# ip = "127.0.0.2"
# port = 5001
# api_server.connect((ip, port))
# message = "api"
# api_server.send(message.encode())
app.run(debug=True) |
MAX_RESULTS = 10 # Max emails retrieved in a single list request
SCOPES = ["https://mail.google.com/"] # Full scope for read/write/modify/delete
AUTH_TOKEN = "token.json" # Oauth token that is generated after the first authentication flow
RETRIEVE_LABELS = ['INBOX', 'UNREAD'] # Labels fetched
CREDENTIALS_FILE = "credentials.json"
|
from flask import Flask
app = Flask(__name__)
@app.route('/user/<username>')
def show_user_profile(username):
return "User {}".format(username)
@app.route('/post/<int:post_id>')
def show_post(post_id):
return "Post {}".format(post_id)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
from management.models import appointment
from django.shortcuts import render,redirect
from django.contrib import messages
from django.contrib.auth.models import User,auth
from django.contrib.auth import authenticate,login,logout
from .forms import CreateUserForm
from django.contrib import messages
# Create your views here.
def appoint(request):
if request.method=='POST':
ap = appointment()
name=request.POST.get('apname')
num=request.POST.get('apnum')
prob=request.POST.get('approb')
t = request.POST.get('time')
ap.name=name
ap.Phone_Number=num
ap.problem=prob
ap.appointment_Date = t
ap.save()
messages.success(request,"Appointment Sent Successfully")
return render(request,"appointment.html")
else:
messages.warning(request, "Appointment not sent")
return render(request,"appointment.html")
def loginpage(request) :
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request,user)
return redirect("/")
else:
messages.info(request,'invalid username/login')
return redirect('login')
context = {}
return render(request,'login.html',context)
def register(request):
form = CreateUserForm()
if request.method == 'POST' :
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request,"Account was created for "+user)
return redirect('login')
else:
messages.warning(request,"Account was not created")
return redirect('register')
context = {'form':form}
return render(request,'register.html',context)
# if request.method == 'POST':
# first_name = request.POST['first_name']
# last_name = request.POST['last_name']
# username = request.POST['user_name']
# password1 = request.POST['password1']
# password2 = request.POST['password2']
# email = request.POST['email']
# phone_number = request.POST['phone_number']
# address = request.POST['address']
# if password1 == password2 :
# if User.objects.filter(username=username).exists():
# messages.info(request,'User name already taken')
# return redirect('register')
# elif User.objects.filter(email=email).exists():
# messages.info(request,'email already taken')
# return redirect('register')
# else:
# user = User.objects.create_user(username=username,password=password1,email=email,first_name=first_name,last_name=last_name)
# user.save()
# messages.info(request,'Account created')
# return redirect('login')
# else:
# messages.info(request,'Password and confirm password should not match')
# return redirect('register')
# return redirect('/')
# else:
# return render(request,'register.html')
def logoutpage(request):
logout(request)
return redirect('/') |
import sys
from subprocess import Popen, PIPE
def insert_clickhouse(csv_file):
insert_clickhouse_cmd = 'cat ' + csv_file +' | clickhouse-client -h132.227.123.200 --query="INSERT INTO heartbeat.versioned_probes FORMAT CSV"'
insert_clickhouse_process = Popen(insert_clickhouse_cmd,
stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = insert_clickhouse_process.communicate()
print(insert_clickhouse_cmd)
def remove_version(version, csv_file, ofile):
remove_version_cmd = "time awk -F, '{$(NF+1)=-1 FS 1;}1' OFS=, " + csv_file + " > " + ofile
Popen(remove_version_cmd,
stdout=PIPE, stderr=PIPE, shell=True)
def add_version(version, csv_file, ofile):
add_version_cmd = "time awk -F, '{$(NF+1)=1 FS "+ str(version)+ ";}1' OFS=, " + csv_file + " > " + ofile
add_version_process = Popen(add_version_cmd,
stdout=PIPE, stderr=PIPE, shell=True)
add_version_process.communicate()
if __name__ == "__main__":
# csv_file_prefix = sys.argv[1]
# ofile_prefix = sys.argv[2]
# version = sys.argv[3]
# for i in reversed(range(0, 11)):
#
# csv_file = csv_file_prefix + str(i)+".csv"
# ofile = ofile_prefix + str(i) + ".csv"
# print("Processing " + csv_file + "...")
# add_version(version, csv_file, ofile)
snapshots = ["snapshot-0617/",
"snapshot-0703/",
"snapshot-0704/",
"snapshot-0705/",
"snapshot-0706/",
"snapshot-0707/"]
for snapshot in snapshots:
print("Inserting " + snapshot + "...")
for i in range(1, 3):
print("Inserting round " + str(i))
versioned_csv_file = "resources/" + snapshot + "versioned_replies_round_" + str(i) + ".csv"
insert_clickhouse(versioned_csv_file)
|
class MapSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.cache = {}
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
self.cache[key] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
length = len(prefix)
temp = 0
for key in self.cache.keys():
if len(key) >= length and key[:length] == prefix:
temp += self.cache[key]
return temp
# Your MapSum object will be instantiated and called as such:
obj = MapSum()
obj.insert('apple',3)
param_2 = obj.sum('ap')
print(param_2) |
"""
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
##############################
# Start - Global Code Block
def say(msg):
phantom.debug(msg)
phantom.comment(comment=msg)
def flatten_strlist(input_list):
def _recurse(input_list):
new_list = []
for item in input_list:
if isinstance(item, list) or isinstance(item, tuple):
new_list += _recurse(item)
else:
if isinstance(item, basestring):
new_list += item.split(',')
return new_list
return filter(lambda x:x, map(lambda x:x.strip() if isinstance(x, basestring) else x, _recurse(input_list)))
def get_list(list_name):
custom_list = phantom.get_list(list_name=list_name)
if (isinstance(custom_list, tuple) or isinstance(custom_list, list)) and len(custom_list) >= 3 and custom_list[0] == True:
return flatten_strlist(custom_list[2])
else:
say("Unable to retrieve List from {}".format(list_name))
return []
def get_custom_list_name(param):
items = param.split(":", 1)
if len(items) > 1:
return items[1].strip()
else:
return param.strip()
def get_users_from_parsed_list(parsed_list, cef_field):
found_users = []
for user in parsed_list:
if user['count'] > 0:
# extract users from returned query
UserName = user['data'][0]['cef'][cef_field]
# avoid repeated users
if UserName not in found_users:
found_users.append(UserName)
return found_users
# End - Global Code block
##############################
def on_start(container):
phantom.debug('on_start() called')
# call 'Is_event_valid' block
Is_event_valid(container=container)
return
def Set_event_status_open(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Set_event_status_open() called')
phantom.set_status(container=container, status="Open")
Promote_to_case(container=container)
return
def Is_event_valid(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Is_event_valid() called')
# check for 'if' condition 1
matched = phantom.decision(
container=container,
conditions=[
["artifact:*.cef. user", "!=", ""],
["artifact:*.cef.dest_host", "!=", ""],
["artifact:*.cef.src_host", "!=", ""],
["artifact:*.cef.dvc_host", "!=", ""],
["artifact:*.cef.destinationUserName", "!=", ""],
["artifact:*.cef.sourceUserName", "!=", ""],
],
logical_operator='or')
# call connected blocks if condition 1 matched
if matched:
Set_event_status_open(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# call connected blocks for 'else' condition 2
Stop_processing(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Assign_to_CSIRT(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Assign_to_CSIRT() called')
phantom.set_owner(container=container, role="CSIRT")
return
def Assign_to_SOC_L1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Assign_to_SOC_L1() called')
phantom.set_owner(container=container, role="SOC L1")
return
def Assign_to_SOC_L2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Assign_to_SOC_L2() called')
phantom.set_owner(container=container, role="SOC L2")
return
def Stop_processing(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Stop_processing() called')
phantom.comment(container=container, comment="No user or host found, processing stopped")
return
def Promote_to_case(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Promote_to_case() called')
phantom.promote(container=container, template="Credential Access")
Set_severity(container=container)
return
def Set_severity(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Set_severity() called')
container_data = phantom.collect2(container=container, datapath=['artifact:*.cef.urgency', 'artifact:*.id'])
container_item_0 = [item[0] for item in container_data]
################################################################################
## Custom Code Start
################################################################################
urgency_mapping = {
"high": 1,
"medium": 2,
"low": 3
}
urgency = "low"
for item in container_item_0:
if urgency_mapping.get(item, 99) < urgency_mapping.get(urgency):
urgency = item
say("Setting severity to {}".format(urgency.capitalize()))
phantom.set_severity(container=container, severity=urgency.capitalize())
################################################################################
## Custom Code End
################################################################################
Find_VIP_users(container=container)
return
def Assign_to_team(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Assign_to_team() called')
# check for 'if' condition 1
matched = phantom.decision(
container=container,
action_results=results,
conditions=[
["Decide_on_assignment:custom_function:assign_to", "==", "SOC L2"],
])
# call connected blocks if condition 1 matched
if matched:
Assign_to_SOC_L2(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# check for 'elif' condition 2
matched = phantom.decision(
container=container,
action_results=results,
conditions=[
["Decide_on_assignment:custom_function:assign_to", "==", "SOC L1"],
])
# call connected blocks if condition 2 matched
if matched:
Assign_to_SOC_L1(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# check for 'elif' condition 3
matched = phantom.decision(
container=container,
action_results=results,
conditions=[
["Decide_on_assignment:custom_function:assign_to", "==", "CSIRT"],
])
# call connected blocks if condition 3 matched
if matched:
Assign_to_CSIRT(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
return
def join_Assign_to_team(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_Assign_to_team() called')
# if the joined function has already been called, do nothing
if phantom.get_run_data(key='join_Assign_to_team_called'):
return
# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed
if phantom.completed(action_names=['Get_DestinationUserName_Users', 'Get_SourceUserName_Users']):
# save the state that the joined function has now been called
phantom.save_run_data(key='join_Assign_to_team_called', value='Assign_to_team')
# call connected block "Assign_to_team"
Assign_to_team(container=container, handle=handle)
return
def Decide_if_VIP_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Decide_if_VIP_users() called')
# check for 'if' condition 1
matched = phantom.decision(
container=container,
conditions=[
["Find_VIP_users:custom_function:vip_users", "!=", ""],
])
# call connected blocks if condition 1 matched
if matched:
VIP_users_HUD_card(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# call connected blocks for 'else' condition 2
join_decision_5(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Find_VIP_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Find_VIP_users() called')
input_parameter_0 = "custom_list:vip_list"
container_data = phantom.collect2(container=container, datapath=['artifact:*.cef.sourceUserName', 'artifact:*.cef.destinationUserName', 'artifact:*.id'])
container_item_0 = [item[0] for item in container_data]
container_item_1 = [item[1] for item in container_data]
Find_VIP_users__vip_users = None
Find_VIP_users__users_list = None
################################################################################
## Custom Code Start
################################################################################
# flatten_strlist defined in global block
users_list = flatten_strlist([container_item_0] + [container_item_1])
# get_custom_list_name() defined in global block
list_name = get_custom_list_name(input_parameter_0)
# get_list() defined in global block
vip_users = get_list(list_name)
# normalize usernames to all uppercase, remove domain
users_list = map(lambda x:x.split("\\")[-1].strip().upper(), users_list)
vip_users = map(lambda x:x.split("\\")[-1].strip().upper(), vip_users)
found_vip_users = sorted(set(users_list) & set(vip_users))
if len(found_vip_users) > 0:
Find_VIP_users__vip_users = ", ".join(found_vip_users)
say("Found {} VIP users".format(len(found_vip_users)))
say(Find_VIP_users__vip_users)
else:
say("No VIP users found")
if len(users_list) > 0:
Find_VIP_users__users_list = users_list
else:
say("No users found")
####
################################################################################
## Custom Code End
################################################################################
phantom.save_run_data(key='Find_VIP_users:vip_users', value=json.dumps(Find_VIP_users__vip_users))
phantom.save_run_data(key='Find_VIP_users:users_list', value=json.dumps(Find_VIP_users__users_list))
Decide_if_VIP_users(container=container)
return
def VIP_users_HUD_card(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('VIP_users_HUD_card() called')
Find_VIP_users__vip_users = json.loads(phantom.get_run_data(key='Find_VIP_users:vip_users'))
phantom.pin(container=container, data=Find_VIP_users__vip_users, message="VIP Users", pin_type="card", pin_style="grey", name="VIP users")
join_decision_5(container=container)
return
def decision_5(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_5() called')
name_value = container.get('name', None)
# check for 'if' condition 1
matched = phantom.decision(
container=container,
conditions=[
["Rule", "in", name_value],
])
# call connected blocks if condition 1 matched
if matched:
format_1(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# call connected blocks for 'else' condition 2
join_Decide_on_assignment(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def join_decision_5(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_decision_5() called')
# no callbacks to check, call connected block "decision_5"
phantom.save_run_data(key='join_decision_5_called', value='decision_5', auto=True)
decision_5(container=container, handle=handle)
return
def Find_recurring_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Find_recurring_users() called')
Find_VIP_users__vip_users = json.loads(phantom.get_run_data(key='Find_VIP_users:vip_users'))
Find_VIP_users__users_list = json.loads(phantom.get_run_data(key='Find_VIP_users:users_list'))
results_data_1 = phantom.collect2(container=container, datapath=['Get_DestinationUserName_Users:action_result.data.*.parsed_response_body'], action_results=results)
results_data_2 = phantom.collect2(container=container, datapath=['Get_SourceUserName_Users:action_result.data.*.parsed_response_body'], action_results=results)
results_item_1_0 = [item[0] for item in results_data_1]
results_item_2_0 = [item[0] for item in results_data_2]
Find_recurring_users__recurring_users_hudcard = None
################################################################################
## Custom Code Start
################################################################################
# recurrent users list
r_users_list = []
# should be greater than 1 because query will return container being executed as well
if len(results_item_1_0) > 1 or len(results_item_2_0) > 1:
destinationUserName = get_users_from_parsed_list(results_item_1_0, 'destinationUserName')
sourceUserName = get_users_from_parsed_list(results_item_2_0, 'sourceUserName')
r_users_list = sorted(list(set(destinationUserName) + set(sourceUserName)))
if len(r_users_list) > 0:
# flatten recurrent users list
r_users_list = flatten_strlist(r_users_list)
# normalize usernames to all uppercase, remove domain
r_users_list = map(lambda x:x.split("\\")[-1].strip().upper(), r_users_list)
# find exact recurrent users
found_r_users = sorted(set(r_users_list) & set(Find_VIP_users__users_list))
if len(found_r_users) > 0:
if Find_VIP_users__vip_users != None:
# retrieve normalized vip users
vip_users = Find_VIP_users__vip_users.split(",")
# add suffix VIP: in case they are vip
found_r_users = map(lambda x:"VIP: "+x if x in vip_users else x, found_r_users)
Find_recurring_users__recurring_users_hudcard = ", ".join(found_r_users)
say("Found Recurrent User(s): {}".format(Find_recurring_users__recurring_users_hudcard))
################################################################################
## Custom Code End
################################################################################
phantom.save_run_data(key='Find_recurring_users:recurring_users_hudcard', value=json.dumps(Find_recurring_users__recurring_users_hudcard))
decision_6(container=container)
return
def join_Find_recurring_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_Find_recurring_users() called')
# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed
if phantom.completed(action_names=['Get_DestinationUserName_Users', 'Get_SourceUserName_Users']):
# call connected block "Find_recurring_users"
Find_recurring_users(container=container, handle=handle)
return
def format_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
from datetime import datetime, timedelta
phantom.debug('format_1() called')
# users_list = json.loads(phantom.get_run_data(key='Find_VIP_users:users_list'))
end_date = container.get('create_time', None)[:10]
# create time filter for querying
end_date_str = datetime.strptime(end_date, '%Y-%m-%d')
# 30 days past time window from container created time
start_date = (end_date_str - timedelta(days=30))
start_date = datetime.strftime(start_date, '%Y-%m-%d')
# say("Recurrent Users filters:\nLabel: credentialaccess\nUsernames: {}\nStart Date: {} End Date: {}".format(", ".join(users_list), start_date, end_date))
template_1 = """%%
rest/artifact?_filter_container__label__icontains=\"credentialaccess\"&_filter_cef__destinationUserName__icontains=\"{{0}}\"&_filter_container__create_time__gt=\"{time}\"
%%""".format(time=start_date)
template_2 = """%%
rest/artifact?_filter_container__label__icontains=\"credentialaccess\"&_filter_cef__sourceUserName__icontains=\"{{0}}\"&_filter_container__create_time__gt=\"{time}\"
%%""".format(time=start_date)
# template without any time period filter (tab should be removed form the second and third lines)
#template = """%%
#rest/artifact?_filter_container__label__icontains=\"credentialaccess\"&_filter_cef__destinationUserName__icontains=\"{0}\"
#%%"""
# parameter list for template variable replacement
parameters = [
"Find_VIP_users:custom_function:users_list",
]
phantom.format(container=container, template=template_1, parameters=parameters, name="format_1")
phantom.format(container=container, template=template_2, parameters=parameters, name="format_12")
Get_DestinationUserName_Users(container=container)
Get_SourceUserName_Users(container=container)
return
def Get_DestinationUserName_Users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Get_DestinationUserName_Users() called')
# collect data for 'Get_DestinationUserName_Users' call
formatted_data_1 = phantom.get_format_data(name='format_1__as_list')
parameters = []
# build parameters list for 'Get_DestinationUserName_Users' call
for formatted_part_1 in formatted_data_1:
parameters.append({
'headers': "",
'location': formatted_part_1,
'verify_certificate': False,
})
phantom.act(action="get data", parameters=parameters, assets=['http_rest'], callback=join_Find_recurring_users, name="Get_DestinationUserName_Users")
return
def Recurring_users_HUD_card(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Recurring_users_HUD_card() called')
Find_recurring_users__recurring_users_hudcard = json.loads(phantom.get_run_data(key='Find_recurring_users:recurring_users_hudcard'))
phantom.pin(container=container, data=Find_recurring_users__recurring_users_hudcard, message="Recurring Users", pin_type="card", pin_style="grey", name="Recurring Users")
join_Decide_on_assignment(container=container)
return
def decision_6(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_6() called')
# check for 'if' condition 1
matched = phantom.decision(
container=container,
action_results=results,
conditions=[
["Find_recurring_users:custom_function:recurring_users_hudcard", "!=", None],
])
# call connected blocks if condition 1 matched
if matched:
Recurring_users_HUD_card(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# call connected blocks for 'else' condition 2
join_Decide_on_assignment(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Decide_on_assignment(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Decide_on_assignment() called')
name_value = container.get('name', None)
severity_value = container.get('severity', None)
input_parameter_0 = "custom_list:nominal_users"
input_parameter_1 = "custom_list:service_users"
container_data = phantom.collect2(container=container, datapath=['artifact:*.cef.user_type', 'artifact:*.cef.dest_host_type', 'artifact:*.cef.dvc_host', 'artifact:*.id'])
container_item_0 = [item[0] for item in container_data]
container_item_1 = [item[1] for item in container_data]
container_item_2 = [item[2] for item in container_data]
Decide_on_assignment__priority_hud_card_data = None
Decide_on_assignment__priority_hud_card_colar = None
Decide_on_assignment__assign_to = None
################################################################################
## Custom Code Start
################################################################################
# get_list() and get_custom_list_name defined in global block
nominal_users = get_list(get_custom_list_name(input_parameter_0))
# get_list() and get_custom_list_name defined in global block
service_users = get_list(get_custom_list_name(input_parameter_1))
# user_type: NOMINAL or SERVICE
user_type = flatten_strlist(container_item_0)
user_type = map(lambda x: x.strip().upper(), user_type)
user_type_options = nominal_users + service_users
# dest_host_type: WORKSTATION or SERVER
dest_host_type = flatten_strlist(container_item_1)
dest_host_type = map(lambda x: x.strip().upper(), dest_host_type)
dest_host_type_options = ["WORKSTATION", "SERVER"]
# container severity
severity = severity_value.strip().upper()
# Inconclusive settings
Decide_on_assignment__assign_to = "SOC L1"
Decide_on_assignment__priority_hud_card_colar = "gray"
Decide_on_assignment__priority_hud_card_data = "Inconclusive"
# gather recurrent user information
if "MXEARUN" in name_value:
recurring_users = (phantom.get_run_data(key='Find_recurring_users:recurring_users_hudcard') != None)
else:
recurring_users = False
if recurring_users == True:
Decide_on_assignment__assign_to = "CSIRT"
Decide_on_assignment__priority_hud_card_colar = "red"
Decide_on_assignment__priority_hud_card_data = "Highest Priority"
elif severity == "HIGH":
if (dest_host_type in dest_host_type) or (user_type in user_type_options):
Decide_on_assignment__assign_to = "SOC L2"
Decide_on_assignment__priority_hud_card_colar = "red"
Decide_on_assignment__priority_hud_card_data = "High Priority"
elif severity == "MEDIUM":
Decide_on_assignment__priority_hud_card_data = "Medium Priority"
################################################################################
## Custom Code End
################################################################################
phantom.save_run_data(key='Decide_on_assignment:priority_hud_card_data', value=json.dumps(Decide_on_assignment__priority_hud_card_data))
phantom.save_run_data(key='Decide_on_assignment:priority_hud_card_colar', value=json.dumps(Decide_on_assignment__priority_hud_card_colar))
phantom.save_run_data(key='Decide_on_assignment:assign_to', value=json.dumps(Decide_on_assignment__assign_to))
decision_7(container=container)
return
def join_Decide_on_assignment(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_Decide_on_assignment() called')
# if the joined function has already been called, do nothing
if phantom.get_run_data(key='join_Decide_on_assignment_called'):
return
# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed
if phantom.completed(action_names=['Get_DestinationUserName_Users', 'Get_SourceUserName_Users']):
# save the state that the joined function has now been called
phantom.save_run_data(key='join_Decide_on_assignment_called', value='Decide_on_assignment')
# call connected block "Decide_on_assignment"
Decide_on_assignment(container=container, handle=handle)
return
def Priority_red_HUD_card(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Priority_red_HUD_card() called')
Decide_on_assignment__priority_hud_card_data = json.loads(phantom.get_run_data(key='Decide_on_assignment:priority_hud_card_data'))
phantom.pin(container=container, data=Decide_on_assignment__priority_hud_card_data, message="Priority", pin_type="card", pin_style="red", name="Priority")
join_Assign_to_team(container=container)
return
def decision_7(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('decision_7() called')
# check for 'if' condition 1
matched = phantom.decision(
container=container,
action_results=results,
conditions=[
["Decide_on_assignment:custom_function:priority_hud_card_colar", "==", "red"],
])
# call connected blocks if condition 1 matched
if matched:
Priority_red_HUD_card(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
# call connected blocks for 'else' condition 2
Priority_grey_HUD_card(action=action, success=success, container=container, results=results, handle=handle, custom_function=custom_function)
return
def Priority_grey_HUD_card(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Priority_grey_HUD_card() called')
Decide_on_assignment__priority_hud_card_data = json.loads(phantom.get_run_data(key='Decide_on_assignment:priority_hud_card_data'))
phantom.pin(container=container, data=Decide_on_assignment__priority_hud_card_data, message="Priority", pin_type="card", pin_style="grey", name="Priority")
join_Assign_to_team(container=container)
return
def Get_SourceUserName_Users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('Get_SourceUserName_Users() called')
# collect data for 'Get_SourceUserName_Users' call
formatted_data_1 = phantom.get_format_data(name='format_12__as_list')
parameters = []
# build parameters list for 'Get_SourceUserName_Users' call
for formatted_part_1 in formatted_data_1:
parameters.append({
'headers': "",
'location': formatted_part_1,
'verify_certificate': False,
})
phantom.act(action="get data", parameters=parameters, assets=['http_rest'], callback=join_Find_recurring_users, name="Get_SourceUserName_Users")
return
def format_3(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
from datetime import datetime, timedelta
phantom.debug('format_3() called')
# users_list = json.loads(phantom.get_run_data(key='Find_VIP_users:users_list'))
end_date = container.get('create_time', None)[:10]
# create time filter for querying
end_date_str = datetime.strptime(end_date, '%Y-%m-%d')
# 30 days past time window from container created time
start_date = (end_date_str - timedelta(days=30))
start_date = datetime.strftime(start_date, '%Y-%m-%d')
# say("Recurrent Users filters:\nLabel: credentialaccess\nUsernames: {}\nStart Date: {} End Date: {}".format(", ".join(users_list), start_date, end_date))
template = """%%
rest/artifact?_filter_container__label__icontains=\"credentialaccess\"&_filter_cef__sourceUserName__icontains=\"{{0}}\"&_filter_container__create_time__gt=\"{time}\"
%%""".format(time=start_date)
# template without any time period filter (tab should be removed form the second and third lines)
#template = """%%
#rest/artifact?_filter_container__label__icontains=\"credentialaccess\"&_filter_cef__destinationUserName__icontains=\"{0}\"
#%%"""
# parameter list for template variable replacement
parameters = [
"Find_VIP_users:custom_function:users_list",
]
phantom.format(container=container, template=template, parameters=parameters, name="format_3")
Get_SourceUserName_Users(container=container)
return
def on_finish(container, summary):
phantom.debug('on_finish() called')
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
return |
from typing import Optional, Union
import numpy as np
from ._abstract import AbstractLinearProgram
from ._constraint import LinearConstraint
from ._equation import LinearEquation
from ._utilities import pad_right
class ObjectiveFunction:
def __init__(self, equation: LinearEquation, objective: str):
self._c = equation.coefficients
self._z = equation.constant
self._objective = objective
def __repr__(self):
pass # TODO
# TODO for unconstrained programs, user can directly call .where or .compile
def subject_to(self, *constraints: LinearConstraint) -> AbstractLinearProgram:
if constraints is None:
raise ValueError("At least one constraint must be provided.")
inequalities = []
length = max(constraint.coefficients.shape[0] for constraint in constraints)
A = np.empty(length)
b = np.empty(1)
for constraint in constraints:
inequalities.append(constraint.inequality)
A = np.vstack((A, pad_right(constraint.coefficients, length)))
b = np.r_[b, constraint.constant]
A = A[1:]
b = b[1:]
return AbstractLinearProgram(
A, b, pad_right(self._c, length), self._z, self._objective, inequalities
)
def fill(self, length):
pass # TODO
@property
def objective(self) -> str:
return self._objective
@property
def coefficients(self) -> Optional[np.ndarray]:
return self._c
@property
def constant(self) -> float:
return self._z
def minimize(equation: Union[LinearEquation, float]) -> ObjectiveFunction:
if isinstance(equation, float) or isinstance(equation, int):
equation = LinearEquation({}, equation)
return ObjectiveFunction(equation, "min")
def maximize(equation: Union[LinearEquation, float]) -> ObjectiveFunction:
if isinstance(equation, float) or isinstance(equation, int):
equation = LinearEquation({}, equation)
return ObjectiveFunction(equation, "max")
|
lista = [ ]
numero = int(input("Digite seu número: "))
contador = 0
while contador < numero:
if contador % 2 != 0:
lista.append(contador)
contador += 1
print("Os número ímpares são: ", lista)
|
#!/usr/bin/env python
'''
Program : lha2isa.py
Version : 1.0
Author : b.k.gjelsten@fys.uio.no
Description :
TO FIX
x Need to deal with the decays of antiparticles
o Note also the hack for the top mass ... maybe look up the value
o There are some negative widths ... (from slha file?)
o Finally do the EWSB parameters at the end (can make optional)
o decays are not ordered after particle code here while they are in the original: simple to fix
-- NOTE: tried to use pyslha to write the wig file, but did not work out of the box
'''
import sys,os
import bkgjelstenArgReader
from kilelib import ReadPipeOrFile, FilenameReplace, WriteToFile
import pyslha_edited as pyslha
import math
#from isa2lha import isa2pdg # problem with importing
# ##################################################### GLOBAL METHODS
# ##################################################### GLOBAL METHODS
# ##################################################### GLOBAL METHODS
isaIDs = range(401,457+1) + range(203,207+1) + [458] + [6,12]
badisaIDs = [438,440,442,444,446,448, 458]
# =========================================================================== # isa2pdg
def PdgID_of_AntiPart(pdg):
apdg = abs(pdg)
# standard cases
if apdg in range(1000001,1000006+1) + range(2000001,2000006+1) + range(1000011,1000016+1) + range(2000011,2000016+1) + [1000024,1000037] + [37] + range(1,6+1) + range(11,16+1) + [24] + [211]:
return -pdg
# its own antiparticle
if pdg in [1000021, 1000022,1000023,1000025,1000035, 1000039, 25, 35, 36, 21,22,23, 111]:
return pdg
# should not land here
out = 'PdgID_of_AntiPart::FATAL: unknown pdg: %i ' %(pdg)
sys.exit(out)
# #####
isa2pdg = {
401: 1000001, #dL
402: 1000002,
403: 1000003,
404: 1000004,
405: 1000005,
406: 1000006,
407: -1000001, #dLbar
408: -1000002,
409: -1000003,
410: -1000004,
411: -1000005,
412: -1000006,
413: 2000001, #dR
414: 2000002,
415: 2000003,
416: 2000004,
417: 2000005,
418: 2000006,
419: -2000001, #dRbar
420: -2000002,
421: -2000003,
422: -2000004,
423: -2000005,
424: -2000006,
425: 1000011, #eL -
426: 1000012, #ve
427: 1000013, #mL -
428: 1000014, #vm
429: 1000015, #TL -
430: 1000016, #vT
431: -1000011, #eL bar +
432: -1000012, #ve
433: -1000013, #mL +
434: -1000014, #vm
435: -1000015, #TL +
436: -1000016, #vT
437: 2000011, #eR -
# 438: 2000012, #veR :)
439: 2000013, #mR -
# 440: 2000014, #vmR
441: 2000015, #TR -
# 442: 2000016, #vTR
443: -2000011, #eR bar +
# 444: -2000012, #veR
445: -2000013, #mR +
# 446: -2000014, #vmR
447: -2000015, #TR +
# 448: -2000016, #vTR
449: 1000021, #gl
450: 1000022, #N1
451: 1000023, #N2
452: 1000025, #N3
453: 1000035, #N4
454: 1000024, #C1 +
455: 1000037, #C2 +
456: -1000024, #C1 -
457: -1000037, #C2 -
458: 1000039, #gravitino (G32) # 2013-06-26: was 10000039 (on too many zeros)
203: 25, #h0 (NB SMhiggs has code 25)
#203: 26, #h0 (NB SMhiggs has code 25) # 2012-06-11: 26 didn't work with herwig++
204: 35, #H0
205: 36, #A0 # was always correct
206: 37, #H+
207: -37, #H-
# Standard Model:
1: 1, #d
2: 2, #u
3: 3, #s
4: 4, #c
5: 5, #b
6: 6, #t
7: -1, #dbar
8: -2, #ubar
9: -3, #sbar
10: -4, #cbar
11: -5, #bbar
12: -6, #tbar
121: 11, #e -
122: 12, #ve
123: 13, #m -
124: 14, #vm
125: 15, #T -
126: 16, #vT
127: -11, #ebar + # 2012-06-28: sign was wrong
128: -12, #vebar # -"-
129: -13, #mbar + # -"-
130: -14, #vmbar # -"-
131: -15, #Tbar + # -"-
132: -16, #vTbar # -"-
13: 21, #gluon
59: 22, #gamma
200: 23, #Z
198: 24, #W+
199: -24, #W-
21: 111, #PI0
38: 211, #PI+
30: -211, #PI-
}
def pdg2isa(pdg):
keys = isa2pdg.keys()
for key in keys:
if isa2pdg[key] == pdg: return key
print 'Warning::pdg2isa pdg not in isa2pdg: %i' %(pdg)
return 0
class lha2isa:
def __init__(s, cmd=[]):
s.cmd = cmd
s.myname = sys.argv[0].split('/').pop()
s.dict = {}
s.VB = 1
s.HOME = os.getenv('HOME')
#s.dir0 = '%s/XXX' %(s.HOME)
s.dir0 = ''
s.dict = {}
# s.dict['test'] = 'testval'
# s.dict['test2'] = 'testval2'
# s.dict['testI'] = 3
# s.dict['testF'] = 4.34
s.pipe = 1
s.fnfn_lha = '' # file with filenames (either this or pipe)
s.fns_lha = [] # need to be set
s.fns_wig = [] # derived (or set?)
s.makewig = 1
s.dict['fn_repl_lha2wig'] = ['_susyhit_slha.out','.wig']
s.dict['fn_repl_lha2wig'] += ['.slha','.wig']
s.dict['fn_repl_lha2wig'] += ['.lha','.wig']
s.dict['fn_repl_lha2wig'] += ['slha','wig']
s.dict['fn_repl_lha2wig'] += ['lha','wig']
s.makeout = 1
s.dict['fn_repl_lha2out'] = ['_susyhit_slha.out','.out']
s.dict['fn_repl_lha2out'] += ['.slha','.out']
s.dict['fn_repl_lha2out'] += ['.lha','.out']
s.fn_wig_in = '' # can specify on command line, otherwise will use a replacement formular (specified above)
s.fn_out_in = ''
s.forcedir = ''
s.warn = []
s.fn_warn = 'warnings.txt'
if s.dir0: s.fn_warn = '%s/%s' %(s.dir0, s.fn_warn)
if 'ReadArg' in s.cmd: s.ReadArg()
if 'Main' in s.cmd: s.Main()
# ##################################################### CLASS METHODS
# ##################################################### CLASS METHODS
# ##################################################### CLASS METHODS
# ##########
def showHelp(s):
print ' Usage: %s [options]' %(s.myname)
print ' echo myfile.slha | lha2isa.py'
print ' lha2isa.py -f fn_with_list_of_slhas # NOT IMPLEMENTED'
print ' echo myfile.slha | lha2isa.py -fnwig a.wig -fnout a.out'
print ' ls *.slha | lha2isa.py'
# ##########
def DumpWarnings(s):
f = open(s.fn_warn,'w')
for out in s.warn: f.write('%s\n' %(out))
f.close()
# ##########
def translate_lha2out(s, lha, fnout='', ret=0):
# only superminimal version is made
# the reading algo, ReadIsaout, is flexible enough to understand and not fail
outs = []
if 'EXTPAR' not in lha[0].keys():
print 'FATAL::lha2isa::translate_lha2out No EXTPAR in lha friend of %s ... could rewrite translate_lha2out to instead take (most) from MSOFT' %(fnout)
msoft = lha[0]['MSOFT'].entries
extpar = lha[0]['EXTPAR'].entries
hmix = lha[0]['HMIX'].entries
minpar = lha[0]['MINPAR'].entries
TANB_Z = minpar.get(3,-1) # at MZ
TANB_INPUT = extpar.get(25,-1) # at EW scale (Q)
TANB_EW = hmix.get(2,-1) # at EW scale (Q) .. I think TANB_INPUT and EW are identical, while TANB_Z is different
if TANB_Z > 0: TANB_use = TANB_Z
elif TANB_INPUT > 0: TANB_use = TANB_INPUT
elif TANB_EW > 0: TANB_use = TANB_EW
else:
TANBuse = -1
print 'warning::slha2wig DID not find TANBETA value, set to unphysical -1'
outs.append(' TAN(BETA) = %9.3f' %(TANB_use))
MU = extpar[23] # or hmix[1]
MGLSS = extpar[3] # this is M_3 ...
mA = extpar[26]
outs.append(' M(GLSS) = %9.3f MU = %9.3f M(HA) = %9.3f' %(MGLSS, MU, mA))
M1 = extpar[1] # or msoft[1]
M2 = extpar[2] # or msoft[2]
outs.append(' M_1 = %9.3f M_2 = %9.3f' %(M1,M2) )
# Finalise:
outs.append('')
outs.append(' PARENT (this is keyword for libISAWIG::ReadIsaout)') # <-- this is what triggers exit in ReadIsaout
# TO FILE
if fnout: WriteToFile(fn=fnout, outs=outs, VB=s.VB)
# RETURN
if ret: return outs
# ##########
def translate_lha2wig(s, lha, fnwig='', ret=0):
outs = []
if s.VB>2: print len(isaIDs)
# 0 INIT
massobj = lha[0].get('MASS',{})
massdict = massobj.entries
nomass = -9e9
particledict = lha[1]
# 1 PARTICLE SECTION
outs.append('%4i' %(len(isaIDs)))
for iisaID in range(len(isaIDs)):
isaID = isaIDs[iisaID]
# if iisaID > 3: continue
# if s.VB>1: print isaID
if isaID in badisaIDs:
outs.append("%5i %11.4f %11.5e" %(isaID, 0, 1e30))
continue
pdgID = isa2pdg.get(isaID,0)
apdgID = abs(pdgID)
if pdgID == 0:
print 'WARNING: isaID: %i' %(isaID)
#if mass == nomass:
# s.warn.append('WARNING::translate_lha2wig No mass for isaID %i pdgID %i' %(isaID, pdgID))
# print s.warn[-1]
if apdgID in [1000022]: # fragile
mass = massdict.get(apdgID,nomass)
lifetime = 1e30 # tja
else:
particle = particledict[apdgID] # NB: for antiparticle will need to 'invert'
mass = particle.mass
width = particle.totalwidth
if width == 0: lifetime = 1e30 # 2014-01-20
else: lifetime = 6.582e-25 / width
if pdgID in [-6,6]:
mass = 172.5 # ever? # fragile
# print isaID
# print mass, type(mass)
# print width
out = "%5i %11.4f %11.5e" %(isaID, mass, lifetime)
outs.append(out)
# 2 DECAY SECTION
for iisaID in range(len(isaIDs)):
isaID = isaIDs[iisaID]
if isaID in badisaIDs:
outs.append(' 0')
continue
pdgID = isa2pdg.get(isaID,0)
apdgID = abs(pdgID)
isAntiPart = False
if pdgID < 0: isAntiPart = True
try:
particle = particledict[apdgID]
except:
# Necessary for LSP
outs.append('%4i' %(0))
continue
decays = particle.decays
ndecays = len(decays)
outs.append('%4i' %(ndecays))
#print 'DEBUG %3i %4i %8i' %(iisaID, isaID, pdgID)
for idecay in range(len(decays)):
decay = decays[idecay]
whatisthis = 0
if isaID in [6,12]: whatisthis = 100
out = ' %5i %15.8e %3i' %(isaID, decay.br, whatisthis)
for ida in range(decay.nda):
thispdgID = decay.ids[ida]
if isAntiPart: thispdgID = PdgID_of_AntiPart(thispdgID)
thisisaID = pdg2isa(thispdgID)
out += ' %5i' %(thisisaID)
for idum in range(5-decay.nda):
out += ' %5i' %(0)
outs.append(out)
# 3 INPUT SECTION
tanbeta = lha[0]['MINPAR'].entries.get(3,-1) # tanbeta_Z: this one is not filled in slha from susyhit
if tanbeta < 0: tanbeta = lha[0]['HMIX'].entries.get(2,-1) # tanbeta_EW: ... can deviate from input value
if tanbeta < 0: tanbeta = lha[0]['EXTPAR'].entries.get(25,-1) # tanbeta_EW: ... can deviate from input value
if tanbeta < 0:
print 'Warning::slha2wig tanbeta not found. set to -1'
alphah = lha[0]['ALPHA'].entries
outs.append(' %12.8f %12.8f' %(tanbeta, alphah))
nmix = lha[0]['NMIX']
for ii in [1,2,3,4]:
outs.append(' %12.8f %12.8f %12.8f %12.8f' %(nmix.entries[ii][1],nmix.entries[ii][2],nmix.entries[ii][3],nmix.entries[ii][4]))
vmixs = lha[0]['VMIX'].entries
outs.append(' %12.8f %12.8f %12.8f %12.8f' %(vmixs[1][1],vmixs[1][2],vmixs[2][1],vmixs[2][2]))
umixs = lha[0]['UMIX'].entries
outs.append(' %12.8f %12.8f %12.8f %12.8f' %(umixs[1][1],umixs[1][2],umixs[2][1],umixs[2][2]))
thetat = math.acos(lha[0]['STOPMIX'].entries[1][1])
thetab = math.acos(lha[0]['SBOTMIX'].entries[1][1])
thetatau = math.acos(lha[0]['STAUMIX'].entries[1][1])
outs.append(' %12.8f %12.8f %12.8f' %(thetat, thetab, thetatau))
a_t = lha[0]['AU'].entries[3][3]
a_b = lha[0]['AD'].entries[3][3]
a_tau = lha[0]['AE'].entries[3][3]
outs.append(' %15.8f %15.8f %15.8f' %(a_t, a_b, a_tau))
mu = lha[0]['HMIX'].entries[1]
outs.append(' %15.8f' %(mu))
outs.append(' T')
# TO FILE
if fnwig: WriteToFile(fn=fnwig, outs=outs, VB=s.VB)
# RETURN
if ret: return outs
# ##########
def Main(s):
# if s.VB: print "INFO::%s Main" %(s.myname)
# for key in s.dict.keys(): print 'dict: %-10s %s' %(key, s.dict[key])
s.fns_lha = ReadPipeOrFile(pipe=s.pipe, f=s.fnfn_lha)
# for out in s.fns_lha: print out
if s.VB>0: print "INFO Number of slha files: %i" %(len(s.fns_lha))
if len(s.fns_lha) == 0: print 'lha2isa: Zero slha files read. No action. (pipe=%i ; f=%s)' %(s.pipe, s.fnfn_lha)
for ilha in range(len(s.fns_lha)):
fn_lha = s.fns_lha[ilha]
#print s.fns_lha
lha = pyslha.readSLHAFile(fn_lha)
#pyslha.writeISAWIGFile(fn_wig, blocks=lha[0], decays=lha[1]) # not out of the box
# This small structure ensures flexibility in getting the output dir right
outdir = ''
if '/' in fn_lha:
w = fn_lha.split('/')
w.pop()
for iw in range(len(w)):
#if iw > 0: outdir += '/'
ww = w[iw]
outdir += '%s/' %(ww)
if s.forcedir: outdir = s.forcedir + '/'
if s.makewig:
if s.fn_wig_in: fn_wig = outdir + s.fn_wig_in
else: fn_wig = FilenameReplace(fn_lha, repl=s.dict['fn_repl_lha2wig'], safeapp='.wig')
s.translate_lha2wig(lha, fnwig=fn_wig)
if s.makeout:
if s.fn_out_in: fn_out = outdir + s.fn_out_in
else: fn_out = FilenameReplace(fn_lha, repl=s.dict['fn_repl_lha2out'], safeapp='.out')
s.translate_lha2out(lha, fnout=fn_out)
# ##########
def ReadArg(s):
# ################################### ARGUMENT READING
Arg = bkgjelstenArgReader.ArgReader(sys.argv, VB=0)
'''
if Arg.hasget('-alist'): print 'a string list: ',Arg.list()
if Arg.hasget('-alisti'): print 'an integer list: ',Arg.listI()
if Arg.hasget('-alistf'): print 'a float list: ',Arg.listF()
if Arg.hasget('-x'): print 'a string: ',Arg.val()
if Arg.hasget('-xI'): print 'an integer: ',Arg.valI()
if Arg.hasget('-xF'): print 'a float: ',Arg.valF()
'''
if Arg.has(['-h','--help','--h','-help']):
s.showHelp()
sys.exit()
if Arg.hasget('-vb'):
s.VB = Arg.valI()
if s.VB: print 'Verbosity level: %i' %(s.VB)
if Arg.has('--pipe'):
s.pipe = 1
if Arg.hasget(['-fnlha','-f']):
s.fnfn_lha = Arg.val()
s.pipe = 0
if Arg.hasget(['-fnwig','-fn_wig']):
s.fn_wig_in = Arg.val()
if Arg.hasget(['-fnout','-fn_out']):
s.fn_out_in = Arg.val()
# ----- The new general procedure for var input (should this be put into the ArgReader?)
if Arg.hasget('-dict'):
zs = Arg.list(':')
# print zs
for z in zs:
zw = z.split(',')
# First determine var type (default is string)
ztype = 'string'
if zw[0] in ['I']: ztype = zw.pop(0)
elif zw[0] in ['F']: ztype = zw.pop(0)
# Then get the key / var name and check
key = zw.pop(0)
if key not in s.dict:
# this restriction might be dropped
print s.dict
sys.exit('FATAL non-existing var set with -var: %s (%s)' %(key, zs))
if len(zw) == 0: sys.exit('FATAL non-allowed arg for -var: %s' %(zs))
# The fill the dict/var
s.dict[key] = [] # First make a list. If only one entry, turn list into a plain value (bottom)
for zw1 in zw:
zval = zw1
if ztype == 'I': zval = int(zw1)
elif ztype == 'F': zval = float(zw1)
s.dict[key].append(zval)
if len(zw) == 1: s.dict[key] = s.dict[key][0] # if just one entry, don't use list
# -----
if not Arg.AllOk():
print 'Problems...'
s.showHelp()
sys.exit("FATAL Ending due to problems of arguments")
# ################################### POST-INIT
############################## EXECUTE IF RUN AS SCRIPT (NOT JUST IMPORTED)
if __name__ == '__main__':
t = lha2isa(cmd=['ReadArg','Main'])
##############################
|
import media
import fresh_tomatoes
import errno
import csv
def fetch_data_file():
file_path = "data/movie_data.csv"
print("Attempting to open file: "+file_path)
try:
file = open(file_path,"r") #open file for 'r' reading
except IOError as e:
if e.errno == errno.ENOENT:
return "movie data file "+file_path+" not found."
raise
else:
return file
def parse_data_file(file):
movies = []
reader = csv.reader(file)
print(reader)
for RowNumber, Data in enumerate(reader):
movie = media.Movie(Data[0], #movie_title
Data[1], #movie_storyline
Data[2], #poster_image
Data[3]) #trailer_youtube
movies.append(movie)
return movies
def main():
file=fetch_data_file()
movies=parse_data_file(file)
fresh_tomatoes.open_movies_page(movies)
if __name__ == '__main__':
main()
|
f = float(input("Enter degrees f°:"))
c = (f - 32) *9 /5
print("this is your degrees in c°", round( c, 2)) |
import enum
import Entry
class CategoryId(enum.Enum):
id_1 = 1,
id_2 = 2,
id_3 = 3,
id_4 = 4,
id_5 = 5,
id_6 = 6,
id_7 = 7,
id_8 = 8,
id_9 = 9,
id_10 = 10,
id_11 = 11,
id_12 = 12,
id_13 = 13,
id_14 = 14,
def build_category_description():
categories = {}
filename = 'analytics\\category.str'
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
s = line.split(':')
category_number = int(s[0].strip())
category_desc = str(s[1].strip())
for v, m in CategoryId.__members__.items():
if m.value[0] == category_number:
categories[v] = category_desc
return categories
_categories = build_category_description()
class CartTransactionCategory(object):
def match(self, entry):
pass
def build_transfer_category_critiria():
d = {}
filename = 'analytics\\TransferCategory.criteria'
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
s = line.split(':')
acc_number = str(s[0].strip())
category_id = int(s[1].strip())
for v, m in CategoryId.__members__.items():
if m.value[0] == category_id:
d[acc_number] = m
return d
_transfer_category_critiria = build_transfer_category_critiria()
class TransferCategory(object):
def match(self, entry):
acc_number = entry.acc_number()
try:
return _transfer_category_critiria[acc_number]
except KeyError:
return None
class Category(object):
"""description of class"""
def guess(self, entry):
type_id = Entry.OperationMap[entry.type()]
if type_id == Entry.OperationType.CART_TRANSACTION:
return CartTransactionCategory().match(entry)
elif type_id == Entry.OperationType.TRANSFER_OUT or type_id == Entry.OperationType.TRANSFER_IN:
return TransferCategory().match(entry)
def description(self, category_id):
return _categories[category_id.name]
|
from extensions import DatabaseConnection as db_conn
from .models import Event
class EventManager(db_conn):
def __init__(self):
pass
def deserialize(self, row):
return Event(*row)
def create(self, event):
query = f"""
INSERT
INTO Event (
ClientUserID,
PlannerUserID,
LocationID,
InstitutionID,
EventName,
EventBudget,
PlanningFee,
StartTimestamp,
EndTimestamp
)
VALUES(
{event.client_user_id},
{event.planner_user_id},
{event.location_id},
{event.institution_id},
'{event.event_name}',
{event.event_budget},
{event.planning_fee},
'{event.start_timestamp}',
'{event.end_timestamp}'
)
RETURNING EventID;
"""
try:
self.execute_write_op(query)
except Exception as e:
self.rollback()
raise e
def update(self, event):
query = f"""
UPDATE Event
SET ClientUserID = {event.client_user_id},
PlannerUserID = {event.planner_user_id},
LocationID = {event.location_id},
InstitutionID = {event.institution_id},
EventName = '{event.event_name}',
EventBudget = {event.event_budget},
PlanningFee = {event.planning_fee},
StartTimestamp = '{event.start_timestamp}',
EndTimestamp = '{event.end_timestamp}'
WHERE EventID = {event.event_id};
"""
try:
self.execute_write_op(query)
except Exception as e:
self.rollback()
raise e
def fetch_by_eventid(self, event_id):
query = f"""
SELECT *
FROM Event
WHERE EventID={event_id};
"""
try:
result = self.fetch_single_row(query)
except Exception as e:
self.rollback()
raise e
return self.deserialize(result)
def fetch_by_eventid_special(self, event_id):
event_query = f"""
SELECT Event.*, Location.LocationName, LocationAddress, "user".FirstName, "user".LastName, Loan_Provider.Name
FROM Event
LEFT JOIN Location ON Event.LocationID = Location.LocationID
LEFT JOIN "user" ON Event.PlannerUserID = "user".UserID
LEFT JOIN Loan_Provider ON Event.InstitutionID = Loan_Provider.InstitutionID
WHERE EventID={event_id};
"""
order_query = f"""
SELECT "order".*, "user".FirstName, "user".LastName, Supply.ItemName
FROM "order"
LEFT JOIN "user" ON "order".SupplierUserID = "user".UserID
LEFT JOIN Supply ON "order".ItemID = Supply.ItemID
WHERE EventID = {event_id};
"""
try:
event_result = self.fetch_single_row(event_query)
order_result = self.fetch_all_rows(order_query)
except Exception as e:
self.rollback()
raise e
return {
'event_id': event_result[0],
'client_user_id': event_result[1],
'planner_user_id': event_result[2],
'location_id': event_result[3],
'institution_id': event_result[4],
'event_name': event_result[5],
'event_budget': event_result[6],
'planning_fee': event_result[7],
'start_timestamp': event_result[8],
'end_timestamp': event_result[9],
'location_name': event_result[10],
'location_address': event_result[11],
'planner_name': f'{event_result[12]} {event_result[13]}',
'loan_provider_name': event_result[14],
'orders': [
{
'item_id': row[0],
'supplier_user_id': row[1],
'client_user_id': row[2],
'quantity': row[4],
'supplier_name': f'{row[5]} {row[6]}',
'item_name': row[7]
} for row in order_result
]
}
def fetch_by_clientid(self, client_id):
query = f"""
SELECT e.EventID, e.ClientUserID, e.PlannerUserID, e.LocationID, e.InstitutionID, e.EventName, e.EventBudget, e.PlanningFee, e.StartTimestamp, e.EndTimestamp,
CASE WHEN o.count is NULL THEN 0 ELSE o.count END AS orders
FROM (
SELECT EventID, count(*) AS count
FROM "order"
GROUP BY EventID
) o
RIGHT JOIN (SELECT * FROM event WHERE ClientUserID={client_id}) e
ON e.EventID = o.EventID;
"""
try:
result = self.fetch_all_rows(query)
except Exception as e:
self.rollback()
raise e
return list(self.deserialize(row) for row in result)
|
##########################
#Strings deuxième partie #
#Auteur: Marlene Marchena#
##########################
message = "Bonjour tout le monde"
print(message)
text1 = "Bonjour"
text2 = 'tout le monde'
print(text1 + " " + text2)
#on peut faire des operation avec strings
print(text1 * 3)
print(len(message))
# print(message.title())
# print(message.upper()) #convert la chaîne en lettres capital
# print(message.lower())
# print(message.find("j")) #cherche la position de la lettre j
# print(message.count("o")) #donne le nombre de fois que "o" aparait in la chaîne
#
# #Ne oublie pas de creer une nouvelle variable
# new_message = message.replace("o", "M")
# print(new_message)
# message_sep = message.split(" ")
# print(message_sep)
#1. Créer une variable mot avec le string banane
#Créer une nouvelle variable mot1 avec le string "banane" en lettes capitales, i.e,
# "BANANE", utiliser la function upper
#2. Changer la lettre 'n' du mot banane par '$', utiliser la function replace
|
def tankvol(h, d, vt):
|
#!/usr/bin/python
class GrabzItWaterMark:
def __init__(self, identifier, xPosition, yPosition, format):
self.Identifier = identifier
self.XPosition = xPosition
self.YPosition = yPosition
self.Format = format |
# This file is only intended for development purposes
from kubeflow.kubeflow.ci import base_runner
base_runner.main(component_name="twa_tests",
workflow_name="twa-tests")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-31 20:19
from __future__ import unicode_literals
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('training', '0010_course_detail_bg'),
]
operations = [
migrations.AlterField(
model_name='teacher',
name='image_thumb',
field=image_cropping.fields.ImageRatioField('image', '200x200', adapt_rotation=False, allow_fullsize=False, free_crop=True, help_text=None, hide_image_field=False, size_warning=False, verbose_name='Эскиз'),
),
]
|
def name(a,*d):
if a==1 and d=="a" and g=="pass":
return g
else:
return a
def call(**args):
for i in args:
print(i)
a=call(name='aloukik',passd='a',pas='s')
if a==1:
print("accepted")
else:
print(a) |
"""
Utility functions for working with FASTA files. Includes various functions for
reading and writing FASTA files.
Written by Kevin Wu in Ortiz Lab, May 2015
"""
import genomeUtil as g
def writeFASTA(filename, header, sequence, trimHeaders = True, append = True):
# Allows us to deal with both file inputs and strings as input.
if type(filename) == file:
fileOut = filename
if append == True:
fileOut = open(fileOut.name, mode = "a")
else:
fileOut = open(fileOut.name, mode = "w")
else:
if append == True:
fileOut = open(filename, mode = "a")
else:
fileOut = open(filename, mode = "w")
sequenceSplitted = g.splitSequence(sequence, 80)
if trimHeaders == True:
header = truncateFASTAHeader(header)
fileOut.write(header + "\n")
for seq in sequenceSplitted:
fileOut.write(seq + "\n")
fileOut.write("\n")
fileOut.close()
def readFASTA(filename):
# Allows us to deal with both file objects and with strings as input
if type(filename) == file:
inFile = filename
else:
inFile = open(filename, 'r')
content = inFile.readlines()
inFile.close()
fastaDict = {} # Stores fasta sequences. Key is header, entry is fasta sequence
for line in content:
line = line.rstrip('\n') # Remove newlines
if ">" in line:
fastaDict[line] = ""
latestHeader = line
else:
fastaDict[latestHeader] = fastaDict[latestHeader] + line
return fastaDict
def splitSequence(line, n = 1):
"""
Splits a nucelotide sequence into units of length n.
"""
splitted = [line[i:i+n] for i in range(0, len(line), n)]
return splitted
def truncateFASTAHeader(fastaHeader, length = 80):
splilitedHeader = splitSequence(fastaHeader, length)
header = splitted[1]
return header
|
import time, datetime
class MyTimer ():
def __init__(self):
self.startTime = time.time()
def getNow(self, format):
return datetime.datetime.today().strftime(format)
def getTime(self):
return time.time() - self.startTime
|
class Question:
def __init__(self):
# print ("Constrctor created\n")
self.container1 = []
self.container2 = []
self.names = set()
def addContainer(self, container):
if len(self.container1) == 0:
self.container1.append(container)
elif len(self.container2) == 0 and container.name != self.container1[0].name:
self.container2.append(container)
elif container.name == self.container1[0].name:
self.container1.append(container)
else:
self.container2.append(container)
def printContainer1(self):
for i in range(0, len(self.container1)):
print ("Name : " + str(self.container1[i].name) + "\nEntity : " + str(self.container1[i].entity) + "\nAttr : " + str(self.container1[i].attribute) + "\nQuantity : " + str(self.container1[i].quantity) + "\n")
def printContainer2(self):
for i in range(0, len(self.container2)):
print ("Name : " + str(self.container2[i].name) + "\nEntity : " + str(self.container2[i].entity) + "\nAttr : " + str(self.container2[i].attribute) + "\nQuantity : " + str(self.container2[i].quantity) + "\n") |
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.views import APIView
from victory.models import Code
from victory.serializers import CodeSerializer
from victory.services import Services
# from rest_framework.renderers import JSONRenderer
# from rest_framework.parsers import JSONParser
# Create your views here.
class VictoryViews(APIView):
service = Services()
def get(self, request):
"""I want to deliver code to test to app"""
response= {
"chains" : ["1-2-3","4-5-6"]
}
return Response(response)
def post(self, request):
"""I want to add a new code in db only if it does not exists"""
data = request.data
print(f"Request POST : {data}")
cs = CodeSerializer(data=data)
if cs.is_valid():
if data['success'] and not self.service.search_existing_code(data['code']):
cs.save()
print(f"code {data['code']} est ajouté dans la base.")
response = {
'msg': f"Le code {data['code']} est ajouté dans la base."
}
else:
print(f"Pas de nouvel ajout.")
response = {
'msg': f"Pas de nouvel ajout."
}
return Response(response, 201)
else:
response = {
'msg': f"Une erreur a été rencontrée {cs.errors}."
}
return Response(response)
|
import os
# set a cron job to run the program
# open the music file
os.system("open /Users/rahul/Desktop/to_u.mp3")
# exit program
exit() |
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import logging
import os
import glob
import sys
import re
import time
import math
import pyutilib.services
from pyutilib.misc import Bunch, Options
import pyomo.util.plugin
from pyomo.opt.base import *
from pyomo.opt.base.solvers import _extract_version
from pyomo.opt.results import *
from pyomo.opt.solver import *
from pyomo.core.base import (SymbolMap,
ComponentMap,
NumericLabeler,
TextLabeler,
is_fixed,
value)
from pyomo.repn import generate_canonical_repn
from pyomo.solvers import wrappers
from pyomo.core.kernel.component_block import IBlockStorage
from six import itervalues, iterkeys, iteritems, advance_iterator
from six.moves import xrange
logger = logging.getLogger('pyomo.solvers')
try:
unicode
except:
basestring = str
_cplex_version = None
cplex_import_available = None
def configure_cplex():
global _cplex_version
global cplex_import_available
if not cplex_import_available is None:
return
try:
import cplex
CPLEXDirect._cplex_module = cplex
# create a version tuple of length 4
_cplex_version = tuple(int(i) for i in cplex.Cplex().get_version().split('.'))
while(len(_cplex_version) < 4):
_cplex_version += (0,)
_cplex_version = _cplex_version[:4]
cplex_import_available=True
except ImportError:
cplex_import_available=False
except Exception as e:
# other forms of exceptions can be thrown by CPLEX python
# import. For example, an error in code invoked by the module's
# __init__. We should continue gracefully and not cause a fatal
# error in Pyomo.
print("Import of cplex failed - cplex message=%s\n" % (e,) )
cplex_import_available=False
class CplexSolverWrapper(wrappers.MIPSolverWrapper):
def __init__(self, solver):
self.cplex = solver
def add(self, constraint):
"""TODO"""
pass
class ModelSOS(object):
def __init__(self):
self.sosType = {}
self.sosName = {}
self.varnames = {}
self.varids = {}
self.weights = {}
self.block_cntr = 0
def count_constraint(self,symbol_map,labeler,variable_symbol_map,soscondata):
if hasattr(soscondata, 'get_items'):
sos_items = list(soscondata.get_items())
else:
sos_items = list(soscondata.items())
level = soscondata.level
if len(sos_items) == 0:
return
self.block_cntr += 1
varnames = self.varnames[self.block_cntr] = []
varids = self.varids[self.block_cntr] = []
weights = self.weights[self.block_cntr] = []
if level == 1:
self.sosType[self.block_cntr] = CPLEXDirect._cplex_module.Cplex.SOS.type.SOS1
elif level == 2:
self.sosType[self.block_cntr] = CPLEXDirect._cplex_module.Cplex.SOS.type.SOS2
else:
raise ValueError("Unsupported SOSConstraint level %s" % level)
self.sosName[self.block_cntr] = symbol_map.getSymbol(soscondata,labeler)
for vardata, weight in sos_items:
if vardata.fixed:
raise RuntimeError("SOSConstraint '%s' includes a fixed variable '%s'. "
"This is currently not supported. Deactivate this constraint "
"in order to proceed" % (soscondata.name, vardata.name))
varids.append(id(vardata))
varnames.append(variable_symbol_map.getSymbol(vardata))
weights.append(weight)
class CPLEXDirect(OptSolver):
"""The CPLEX LP/MIP solver
"""
_cplex_module = None
pyomo.util.plugin.alias('_cplex_direct',
doc='Direct Python interface to the CPLEX LP/MIP solver')
def __init__(self, **kwds):
configure_cplex()
#
# Call base class constructor
#
# This gets overridden by CPLEXPersistent
if 'type' not in kwds:
kwds['type'] = 'cplexdirect'
OptSolver.__init__(self, **kwds)
# this interface doesn't use files, but we can create a log
# file is requested
self._keepfiles = False
# do we warmstart
self._warm_start_solve = False
# io_options
self._symbolic_solver_labels = False
self._output_fixed_variable_bounds = False
self._skip_trivial_constraints = False
# The working problem instance, via CPLEX python constructs.
self._active_cplex_instance = None
# Note: Undefined capabilities default to 'None'
self._capabilities = Options()
self._capabilities.linear = True
self._capabilities.quadratic_objective = True
self._capabilities.quadratic_constraint = True
self._capabilities.integer = True
self._capabilities.sos1 = True
self._capabilities.sos2 = True
# flag allowing for the use, during solves, of user-defined callbacks.
self._allow_callbacks = True
# the CPLEX python API doesn't provide a mechanism to track
# user/system/wall clock time, so it's up to us. stored as an
# attribute of the plugin to facilitate persistance across
# various portions of the method invocations.
self._solve_user_time = None
# collection of id(_VarData).
self._referenced_variable_ids = set()
def available(self, exception_flag=True):
""" True if the solver is available """
if exception_flag is False:
return cplex_import_available
else:
if cplex_import_available is False:
raise ApplicationError(
"No CPLEX <-> Python bindings available - CPLEX direct "
"solver functionality is not available")
else:
return True
def _get_version(self):
if _cplex_version is None:
return _extract_version('')
return _cplex_version
def _get_bound(self, exp):
if exp is None:
return None
if is_fixed(exp):
return value(exp)
raise ValueError("non-fixed bound: " + str(exp))
#
# CPLEX requires objective expressions to be specified via
# something other than a sparse pair!
# NOTE: The returned offset is guaranteed to be a float.
#
def _encode_constraint_body_linear(self, expression, labeler, as_pairs=False):
variables = [] # string names of variables
coefficients = [] # variable coefficients
pairs = []
hash_to_variable_map = expression[-1]
self_variable_symbol_map = self._variable_symbol_map
if 1 in expression:
for var_hash, var_coefficient in iteritems(expression[1]):
vardata = hash_to_variable_map[var_hash]
self._referenced_variable_ids.add(id(vardata))
variable_name = self_variable_symbol_map.getSymbol(vardata)
if as_pairs is True:
pairs.append((variable_name, var_coefficient))
else:
variables.append(variable_name)
coefficients.append(var_coefficient)
offset=0.0
if 0 in expression:
offset = expression[0][None]
if as_pairs is True:
return pairs, offset
else:
expr = CPLEXDirect._cplex_module.SparsePair(ind=variables, val=coefficients)
return expr, offset
#
# CPLEX requires objective expressions to be specified via
# something other than a sparse pair!
# NOTE: The returned offset is guaranteed to be a float.
# NOTE: This function is a variant of the above, specialized
# for LinearCanonicalRepn objects.
#
def _encode_constraint_body_linear_specialized(self,
linear_repn,
labeler,
use_variable_names=True,
cplex_variable_name_index_map=None,
as_pairs=False):
variable_identifiers = [] # strings if use_variable_names = True; integers otherwise.
variable_coefficients = []
pairs = []
# caching for efficiency
constant = linear_repn.constant
coefficients = linear_repn.linear
variables = linear_repn.variables
self_variable_symbol_map = self._variable_symbol_map
if (variables is not None) and \
(len(variables) > 0):
for var_value, var_coefficient in zip(variables, coefficients):
self._referenced_variable_ids.add(id(var_value))
variable_name = self_variable_symbol_map.getSymbol(var_value)
if use_variable_names == False:
cplex_variable_id = cplex_variable_name_index_map[variable_name]
if as_pairs == True:
if use_variable_names == True:
pairs.append((variable_name, var_coefficient))
else:
pairs.append((cplex_variable_id, var_coefficient))
else:
if use_variable_names == True:
variable_identifiers.append(variable_name)
else:
variable_identifiers.append(cplex_variable_id)
variable_coefficients.append(var_coefficient)
offset=0.0
if constant is not None:
offset = constant
if as_pairs is True:
return pairs, offset
else:
expr = CPLEXDirect._cplex_module.SparsePair(ind=variable_identifiers, val=variable_coefficients)
return expr, offset
#
# Handle quadratic constraints and objectives
#
def _encode_constraint_body_quadratic(self,
expression,
labeler,
as_triples=False,
is_obj=1.0):
variables1 = [] # string names of variables
variables2 = [] # string names of variables
coefficients = [] # variable coefficients
triples = []
hash_to_variable_map = expression[-1]
self_variable_symbol_map = self._variable_symbol_map
for vrs, coeff in iteritems(expression[2]):
variable_hash_iter = iterkeys(vrs)
vardata = hash_to_variable_map[advance_iterator(variable_hash_iter)]
self._referenced_variable_ids.add(id(vardata))
var1 = self_variable_symbol_map.getSymbol(vardata)
if len(vrs)==2:
vardata = hash_to_variable_map[advance_iterator(variable_hash_iter)]
self._referenced_variable_ids.add(id(vardata))
var2 = self_variable_symbol_map.getSymbol(vardata)
else:
var2 = var1
if as_triples is True:
triples.append((var1, var2, is_obj*coeff))
else:
variables1.append(var1)
variables2.append(var2)
coefficients.append(coeff)
if as_triples is True:
return triples
else:
expr = CPLEXDirect._cplex_module.SparseTriple(ind1=variables1,
ind2=variables2,
val=coefficients)
return expr
#
# method to populate the CPLEX problem instance (interface) from
# the supplied Pyomo problem instance.
#
def _populate_cplex_instance(self, pyomo_instance):
from pyomo.core.base import Var, Objective, Constraint, SOSConstraint
from pyomo.repn import canonical_is_constant
from pyomo.repn import LinearCanonicalRepn, canonical_degree
self._instance = pyomo_instance
quadratic_constraints = False
quadratic_objective = False
used_sos_constraints = False
cplex_instance = None
try:
cplex_instance = CPLEXDirect._cplex_module.Cplex()
except CPLEXDirect._cplex_module.exceptions.CplexError:
e = sys.exc_info()[1]
msg = 'Unable to create Cplex model. Have you installed the Python'\
'\n bindings for Cplex?\n\n\tError message: %s'
print(sys.exc_info()[1])
raise Exception(msg % e)
if self._symbolic_solver_labels:
labeler = TextLabeler()
else:
labeler = NumericLabeler('x')
symbol_map = SymbolMap()
self._smap_id = id(symbol_map)
if isinstance(pyomo_instance, IBlockStorage):
# BIG HACK (see pyomo.core.kernel write function)
if not hasattr(pyomo_instance, "._symbol_maps"):
setattr(pyomo_instance, "._symbol_maps", {})
getattr(pyomo_instance,
"._symbol_maps")[self._smap_id] = symbol_map
else:
pyomo_instance.solutions.add_symbol_map(symbol_map)
# we use this when iterating over the constraints because it
# will have a much smaller hash table, we also use this for
# the warm start code after it is cleaned to only contain
# variables referenced in the constraints
self_variable_symbol_map = self._variable_symbol_map = SymbolMap()
var_symbol_pairs = []
# cplex wants the caller to set the problem type, which is (for current
# purposes) strictly based on variable type counts.
num_binary_variables = 0
num_integer_variables = 0
num_continuous_variables = 0
# transfer the variables from pyomo to cplex.
var_names = []
var_lbs = []
var_ubs = []
var_types = []
self._referenced_variable_ids.clear()
for var in pyomo_instance.component_data_objects(Var, active=True):
if var.fixed and not self._output_fixed_variable_bounds:
# if a variable is fixed, and we're preprocessing
# fixed variables (as in not outputting them), there
# is no need to add them to the compiled model.
continue
varname = symbol_map.getSymbol( var, labeler )
var_names.append(symbol_map.getSymbol( var, labeler ))
var_symbol_pairs.append((var, varname))
if not var.has_lb():
var_lbs.append(-CPLEXDirect._cplex_module.infinity)
else:
var_lbs.append(value(var.lb))
if not var.has_ub():
var_ubs.append(CPLEXDirect._cplex_module.infinity)
else:
var_ubs.append(value(var.ub))
if var.is_binary():
var_types.append(cplex_instance.variables.type.binary)
num_binary_variables += 1
elif var.is_integer():
var_types.append(cplex_instance.variables.type.integer)
num_integer_variables += 1
elif var.is_continuous():
var_types.append(cplex_instance.variables.type.continuous)
num_continuous_variables += 1
else:
raise TypeError("Invalid domain type for variable with name '%s'. "
"Variable is not continuous, integer, or binary.")
self_variable_symbol_map.addSymbols(var_symbol_pairs)
cplex_instance.variables.add(names=var_names,
lb=var_lbs,
ub=var_ubs,
types=var_types)
# transfer the constraints.
expressions = []
senses = []
rhss = []
range_values = []
names = []
qexpressions = []
qlinears = []
qsenses = []
qrhss = []
qnames = []
# The next loop collects the following component types from the model:
# - SOSConstraint
# - Objective
# - Constraint
sos1 = self._capabilities.sos1
sos2 = self._capabilities.sos2
modelSOS = ModelSOS()
objective_cntr = 0
for block in pyomo_instance.block_data_objects(active=True):
gen_obj_canonical_repn = \
getattr(block, "_gen_obj_canonical_repn", True)
gen_con_canonical_repn = \
getattr(block, "_gen_con_canonical_repn", True)
# Get/Create the ComponentMap for the repn
if not hasattr(block,'_canonical_repn'):
block._canonical_repn = ComponentMap()
block_canonical_repn = block._canonical_repn
# SOSConstraints
for soscondata in block.component_data_objects(SOSConstraint,
active=True,
descend_into=False):
level = soscondata.level
if (level == 1 and not sos1) or \
(level == 2 and not sos2) or \
(level > 2):
raise RuntimeError(
"Solver does not support SOS level %s constraints" % (level))
modelSOS.count_constraint(symbol_map,
labeler,
self_variable_symbol_map,
soscondata)
# Objective
for obj_data in block.component_data_objects(Objective,
active=True,
descend_into=False):
objective_cntr += 1
if objective_cntr > 1:
raise ValueError(
"Multiple active objectives found on Pyomo instance '%s'. "
"Solver '%s' will only handle a single active objective" \
% (pyomo_instance.name, self.type))
if obj_data.is_minimizing():
cplex_instance.objective.\
set_sense(cplex_instance.objective.sense.minimize)
else:
cplex_instance.objective.\
set_sense(cplex_instance.objective.sense.maximize)
cplex_instance.objective.set_name(symbol_map.getSymbol(obj_data,
labeler))
if gen_obj_canonical_repn:
obj_repn = generate_canonical_repn(obj_data.expr)
block_canonical_repn[obj_data] = obj_repn
else:
obj_repn = block_canonical_repn[obj_data]
if (isinstance(obj_repn, LinearCanonicalRepn) and \
((obj_repn.linear is None) or \
(len(obj_repn.linear) == 0))) or \
canonical_is_constant(obj_repn):
print("Warning: Constant objective detected, replacing " + \
"with a placeholder to prevent solver failure.")
cplex_instance.variables.add(lb=[1],
ub=[1],
names=["ONE_VAR_CONSTANT"])
objective_expression = [("ONE_VAR_CONSTANT",obj_repn.constant)]
cplex_instance.objective.set_linear(objective_expression)
else:
if isinstance(obj_repn, LinearCanonicalRepn):
objective_expression, offset = \
self._encode_constraint_body_linear_specialized(
obj_repn,
labeler,
as_pairs=True)
if offset != 0:
cplex_instance.variables.add(lb=[1],
ub=[1],
names=["ONE_VAR_CONSTANT"])
objective_expression.append(("ONE_VAR_CONSTANT",offset))
cplex_instance.objective.set_linear(objective_expression)
else:
#Linear terms
if 1 in obj_repn:
objective_expression, offset = \
self._encode_constraint_body_linear(obj_repn,
labeler,
as_pairs=True)
if offset != 0:
cplex_instance.variables.add(lb=[1],
ub=[1],
names=["ONE_VAR_CONSTANT"])
objective_expression.append(("ONE_VAR_CONSTANT",offset))
cplex_instance.objective.set_linear(objective_expression)
#Quadratic terms
if 2 in obj_repn:
quadratic_objective = True
objective_expression = \
self._encode_constraint_body_quadratic(
obj_repn,
labeler,
as_triples=True,
is_obj=2.0)
cplex_instance.objective.\
set_quadratic_coefficients(objective_expression)
degree = canonical_degree(obj_repn)
if (degree is None) or (degree > 2):
raise ValueError(
"CPLEXDirect plugin does not support general nonlinear "
"objective expressions (only linear or quadratic).\n"
"Objective: %s" % (obj_data.name))
# Constraint
for con in block.component_data_objects(Constraint,
active=True,
descend_into=False):
if (not con.has_lb()) and \
(not con.has_ub()):
assert not con.equality
continue # not binding at all, don't bother
con_repn = None
if con._linear_canonical_form:
con_repn = con.canonical_form()
elif isinstance(con, LinearCanonicalRepn):
con_repn = con
else:
if gen_con_canonical_repn:
con_repn = generate_canonical_repn(con.body)
block_canonical_repn[con] = con_repn
else:
con_repn = block_canonical_repn[con]
# There are conditions, e.g., when fixing variables, under which
# a constraint block might be empty. Ignore these, for both
# practical reasons and the fact that the CPLEX LP format
# requires a variable in the constraint body. It is also
# possible that the body of the constraint consists of only a
# constant, in which case the "variable" of
if isinstance(con_repn, LinearCanonicalRepn):
if self._skip_trivial_constraints and \
((con_repn.linear is None) or \
(len(con_repn.linear) == 0)):
continue
else:
# we shouldn't come across a constant canonical repn
# that is not LinearCanonicalRepn
assert not canonical_is_constant(con_repn)
name = symbol_map.getSymbol(con, labeler)
expr=None
qexpr=None
quadratic = False
if isinstance(con_repn, LinearCanonicalRepn):
expr, offset = \
self._encode_constraint_body_linear_specialized(con_repn,
labeler)
else:
degree = canonical_degree(con_repn)
if degree == 2:
quadratic = True
elif (degree != 0) or (degree != 1):
raise ValueError(
"CPLEXDirect plugin does not support general nonlinear "
"constraint expressions (only linear or quadratic).\n"
"Constraint: %s" % (con.name))
expr, offset = self._encode_constraint_body_linear(con_repn,
labeler)
#Quadratic constraints
if quadratic:
if expr is None:
expr = CPLEXDirect._cplex_module.SparsePair(ind=[0],val=[0.0])
quadratic_constraints = True
qexpr = self._encode_constraint_body_quadratic(con_repn, labeler)
qnames.append(name)
if con.equality:
# equality constraint.
qsenses.append('E')
qrhss.append(self._get_bound(con.lower) - offset)
elif con.has_lb() and con.has_ub():
raise RuntimeError(
"The CPLEXDirect plugin can not translate range "
"constraints containing quadratic expressions.")
elif con.has_lb():
assert not con.has_ub()
qsenses.append('G')
qrhss.append(self._get_bound(con.lower) - offset)
else:
assert con.has_ub()
qsenses.append('L')
qrhss.append(self._get_bound(con.upper) - offset)
qlinears.append(expr)
qexpressions.append(qexpr)
else:
names.append(name)
expressions.append(expr)
if con.equality:
# equality constraint.
senses.append('E')
rhss.append(self._get_bound(con.lower) - offset)
range_values.append(0.0)
elif con.has_lb() and con.has_ub():
# ranged constraint.
senses.append('R')
lower_bound = self._get_bound(con.lower) - offset
upper_bound = self._get_bound(con.upper) - offset
rhss.append(lower_bound)
range_values.append(upper_bound - lower_bound)
elif con.has_lb():
senses.append('G')
rhss.append(self._get_bound(con.lower) - offset)
range_values.append(0.0)
else:
assert con.has_ub()
senses.append('L')
rhss.append(self._get_bound(con.upper) - offset)
range_values.append(0.0)
if modelSOS.sosType:
for key in modelSOS.sosType:
cplex_instance.SOS.add(type = modelSOS.sosType[key],
name = modelSOS.sosName[key],
SOS = [modelSOS.varnames[key],
modelSOS.weights[key]])
self._referenced_variable_ids.update(modelSOS.varids[key])
used_sos_constraints = True
fixed_upper_bounds = []
fixed_lower_bounds = []
for var_id in self._referenced_variable_ids:
varname = self._variable_symbol_map.byObject[var_id]
vardata = self._variable_symbol_map.bySymbol[varname]()
if vardata.fixed:
if not self._output_fixed_variable_bounds:
raise ValueError(
"Encountered a fixed variable (%s) inside an active objective"
" or constraint expression on model %s, which is usually "
"indicative of a preprocessing error. Use the IO-option "
"'output_fixed_variable_bounds=True' to suppress this error "
"and fix the variable by overwriting its bounds in the Cplex "
"instance." % (vardata.name,pyomo_instance.name))
fixed_lower_bounds.append((varname,vardata.value))
fixed_upper_bounds.append((varname,vardata.value))
if len(fixed_upper_bounds):
cplex_instance.variables.set_upper_bounds(fixed_upper_bounds)
if len(fixed_lower_bounds):
cplex_instance.variables.set_upper_bounds(fixed_lower_bounds)
cplex_instance.linear_constraints.add(lin_expr=expressions,
senses=senses,
rhs=rhss,
range_values=range_values,
names=names)
for index in xrange(len(qexpressions)):
cplex_instance.quadratic_constraints.add(lin_expr=qlinears[index],
quad_expr=qexpressions[index],
sense=qsenses[index],
rhs=qrhss[index],
name=qnames[index])
# This gets rid of the annoying "Freeing MIP data." message.
def _filter_freeing_mip_data(val):
if val.strip() == 'Freeing MIP data.':
return ""
return val
cplex_instance.set_warning_stream(sys.stderr,
fn=_filter_freeing_mip_data)
# set the problem type based on the variable counts.
if (quadratic_objective is True) or (quadratic_constraints is True):
if (num_integer_variables > 0) or \
(num_binary_variables > 0) or \
(used_sos_constraints):
if quadratic_constraints is True:
cplex_instance.set_problem_type(
cplex_instance.problem_type.MIQCP)
else:
cplex_instance.set_problem_type(
cplex_instance.problem_type.MIQP)
else:
if quadratic_constraints is True:
cplex_instance.set_problem_type(
cplex_instance.problem_type.QCP)
else:
cplex_instance.set_problem_type(
cplex_instance.problem_type.QP)
elif (num_integer_variables > 0) or \
(num_binary_variables > 0) or \
(used_sos_constraints):
cplex_instance.set_problem_type(
cplex_instance.problem_type.MILP)
else:
cplex_instance.set_problem_type(
cplex_instance.problem_type.LP)
# restore the warning stream without our filter function
cplex_instance.set_warning_stream(sys.stderr)
self._active_cplex_instance = cplex_instance
#
# cplex has a simple, easy-to-use warm-start capability.
#
def warm_start_capable(self):
return True
#
# write a warm-start file in the CPLEX MST format.
#
def _warm_start(self, instance):
# the iteration order is identical to that used in generating
# the cplex instance, so all should be well.
variable_names = []
variable_values = []
for symbol, vardata in iteritems(self._variable_symbol_map.bySymbol):
if vardata().value is not None:
variable_names.append(symbol)
variable_values.append(vardata().value)
if len(variable_names):
self._active_cplex_instance.MIP_starts.add(
[variable_names, variable_values],
self._active_cplex_instance.MIP_starts.effort_level.auto)
def _default_results_format(self, prob_format):
return None
# over-ride presolve to extract the warm-start keyword, if specified.
def _presolve(self, *args, **kwds):
from pyomo.core.base.PyomoModel import Model
# create a context in the temporary file manager for
# this plugin - is "pop"ed in the _postsolve method.
pyutilib.services.TempfileManager.push()
self._warm_start_solve = kwds.pop('warmstart', False)
self._keepfiles = kwds.pop('keepfiles', False)
# extract io_options here as well, since there is
# way to tell what kwds were consumed inside
# OptSolver._presolve. It will be up to that method
# to decide if remaining kwds are error worthy
self._symbolic_solver_labels = \
kwds.pop('symbolic_solver_labels', False)
self._output_fixed_variable_bounds = \
kwds.pop('output_fixed_variable_bounds', False)
# Skip writing constraints whose body section is fixed (i.e., no variables)
self._skip_trivial_constraints = \
kwds.pop("skip_trivial_constraints", False)
# TODO: A bad name for it here, but possibly still
# useful (perhaps generalize the name)
#self._file_determinism = \
# kwds.pop('file_determinism', 1)
# this implies we have a custom solution "parser",
# preventing the OptSolver _presolve method from
# creating one
self._results_format = ResultsFormat.soln
# use the base class _presolve to consume the
# important keywords
OptSolver._presolve(self, *args, **kwds)
if self._log_file is None:
self._log_file = pyutilib.services.TempfileManager.\
create_tempfile(suffix = '.cplex.log')
# Possible TODOs
if self._timelimit is not None:
logger.warning("The 'timelimit' keyword will be ignored "
"for solver="+self.type)
if self._soln_file is not None:
logger.warning("The 'soln_file' keyword will be ignored "
"for solver="+self.type)
self.available()
# Step 1: extract the pyomo instance from the input arguments,
# cache it, and create the corresponding (as of now empty)
# CPLEX problem instance.
if len(args) != 1:
msg = "The CPLEXDirect plugin method '_presolve' must be supplied "\
"a single problem instance - %s were supplied"
raise ValueError(msg % len(args))
model = args[ 0 ]
if not isinstance(model, (Model, IBlockStorage)):
msg = "The problem instance supplied to the CPLEXDirect plugin " \
"method '_presolve' must be of type 'Model' - "\
"interface does not currently support file names"
raise ValueError(msg)
# TBD-document.
self._populate_cplex_instance(model)
# Clean up the symbol map to only contain variables referenced
# in the constraints **NOTE**: The warmstart method (if called
# below), relies on a "clean" symbol map
vars_to_delete = set(self._variable_symbol_map.byObject.keys()) - \
self._referenced_variable_ids
if isinstance(model, IBlockStorage):
symbol_map = getattr(model,
"._symbol_maps")[self._smap_id]
else:
symbol_map = model.solutions.symbol_map[self._smap_id]
sm_byObject = symbol_map.byObject
sm_bySymbol = symbol_map.bySymbol
assert len(symbol_map.aliases) == 0
var_sm_byObject = self._variable_symbol_map.byObject
var_sm_bySymbol = self._variable_symbol_map.bySymbol
for varid in vars_to_delete:
symbol = var_sm_byObject[varid]
del sm_byObject[varid]
del sm_bySymbol[symbol]
del var_sm_byObject[varid]
del var_sm_bySymbol[symbol]
if 'write' in self.options:
fname = self.options.write
self._active_cplex_instance.write(fname)
# Handle other keywords
# if the first argument is a string (representing a filename),
# then we don't have an instance => the solver is being applied
# to a file.
# FIXME: This appears to be a bogus test: we raise an exception
# above if len(args) != 1 or type(args[0]) != Model
if (len(args) > 0) and not isinstance(model, basestring):
# write the warm-start file - currently only supports MIPs.
# we only know how to deal with a single problem instance.
if self._warm_start_solve:
if len(args) != 1:
msg = "CPLEX _presolve method can only handle a single " \
"problem instance - %s were supplied"
raise ValueError(msg % len(args))
cplex_instance = self._active_cplex_instance
cplex_problem_type = cplex_instance.get_problem_type()
if (cplex_problem_type == cplex_instance.problem_type.MILP) or \
(cplex_problem_type == cplex_instance.problem_type.MIQP) or \
(cplex_problem_type == cplex_instance.problem_type.MIQCP):
start_time = time.time()
self._warm_start(model)
end_time = time.time()
if self._report_timing is True:
print("Warm start write time=%.2f seconds"
% (end_time-start_time))
#
# TBD
#
def _apply_solver(self):
# set up all user-specified parameters.
if (self.options.mipgap is not None) and (self.options.mipgap > 0.0):
self._active_cplex_instance.parameters.mip.\
tolerances.mipgap.set(self.options.mipgap)
for key in self.options:
if key == 'relax_integrality' or key == 'mipgap' or key == 'write':
continue
else:
opt_cmd = self._active_cplex_instance.parameters
key_pieces = key.split('_')
for key_piece in key_pieces:
opt_cmd = getattr(opt_cmd,key_piece)
opt_cmd.set(self.options[key])
if 'relax_integrality' in self.options:
self._active_cplex_instance.set_problem_type(
self._active_cplex_instance.problem_type.LP)
if self._tee:
def _process_stream(arg):
sys.stdout.write(arg)
return arg
self._active_cplex_instance.set_results_stream(
self._log_file,
_process_stream)
else:
self._active_cplex_instance.set_results_stream(
self._log_file)
if self._keepfiles:
print("Solver log file: "+self._log_file)
#
# Kick off the solve.
#
# apparently some versions of the CPLEX Python bindings do not
# have the get_time - so check before accessing.
if hasattr(self._active_cplex_instance, "get_time"):
solve_start_time = self._active_cplex_instance.get_time()
self._active_cplex_instance.solve()
self._solve_user_time = \
self._active_cplex_instance.get_time() - solve_start_time
else:
self._active_cplex_instance.solve()
self._solve_user_time = None
# FIXME: can we get a return code indicating if CPLEX had a
# significant failure?
return Bunch(rc=None, log=None)
def _postsolve(self):
# the only suffixes that we extract from CPLEX are
# constraint duals, constraint slacks, and variable
# reduced-costs. scan through the solver suffix list
# and throw an exception if the user has specified
# any others.
extract_duals = False
extract_slacks = False
extract_reduced_costs = False
for suffix in self._suffixes:
flag=False
if re.match(suffix,"dual"):
extract_duals = True
flag=True
if re.match(suffix,"slack"):
extract_slacks = True
flag=True
if re.match(suffix,"rc"):
extract_reduced_costs = True
flag=True
if not flag:
raise RuntimeError(
"***The CPLEXDirect solver plugin cannot "
"extract solution suffix="+suffix)
cplex_instance = self._active_cplex_instance
if cplex_instance.get_problem_type() in [cplex_instance.problem_type.MILP,
cplex_instance.problem_type.MIQP,
cplex_instance.problem_type.MIQCP]:
extract_reduced_costs = False
extract_duals = False
# Remove variables whose absolute value is smaller than
# CPLEX's epsilon from the results data
#cplex_instance.cleanup()
results = SolverResults()
results.problem.name = cplex_instance.get_problem_name()
results.problem.lower_bound = None #cplex_instance.solution.
results.problem.upper_bound = None
results.problem.number_of_variables = cplex_instance.variables.get_num()
results.problem.number_of_constraints = \
cplex_instance.linear_constraints.get_num() \
+ cplex_instance.quadratic_constraints.get_num() \
+ cplex_instance.indicator_constraints.get_num() \
+ cplex_instance.SOS.get_num()
results.problem.number_of_nonzeros = None
results.problem.number_of_binary_variables = \
cplex_instance.variables.get_num_binary()
results.problem.number_of_integer_variables = \
cplex_instance.variables.get_num_integer()
results.problem.number_of_continuous_variables = \
cplex_instance.variables.get_num() \
- cplex_instance.variables.get_num_binary() \
- cplex_instance.variables.get_num_integer() \
- cplex_instance.variables.get_num_semiinteger()
#TODO: Does this double-count semi-integers?
#Should we also remove semi-continuous?
results.problem.number_of_objectives = 1
results.solver.name = "CPLEX "+cplex_instance.get_version()
# results.solver.status = None
results.solver.return_code = None
results.solver.message = None
results.solver.user_time = self._solve_user_time
results.solver.system_time = None
results.solver.wallclock_time = None
results.solver.termination_message = None
soln = Solution()
soln_variable = soln.variable
soln_constraint = soln.constraint
soln.gap = None # until proven otherwise
#Get solution status -- for now, if CPLEX returns anything we
#don't recognize, mark as an error
soln_status = cplex_instance.solution.get_status()
if soln_status in [1, 101, 102]:
results.solver.termination_condition = TerminationCondition.optimal
soln.status = SolutionStatus.optimal
elif soln_status in [2, 118]:
results.solver.termination_condition = TerminationCondition.unbounded
soln.status = SolutionStatus.unbounded
elif soln_status in [4, 119]:
# Note: soln_status of 4 means infeasible or unbounded
# and 119 means MIP infeasible or unbounded
results.solver.termination_condition = TerminationCondition.infeasibleOrUnbounded
soln.status = SolutionStatus.unsure
elif soln_status in [3, 103]:
results.solver.termination_condition = TerminationCondition.infeasible
soln.status = SolutionStatus.infeasible
else:
soln.status = SolutionStatus.error
if cplex_instance.get_problem_type() in [cplex_instance.problem_type.MILP,
cplex_instance.problem_type.MIQP,
cplex_instance.problem_type.MIQCP]:
try:
upper_bound = cplex_instance.solution.get_objective_value()
lower_bound = cplex_instance.solution.MIP.get_best_objective() # improperly named, IM(JPW)HO.
relative_gap = cplex_instance.solution.MIP.get_mip_relative_gap()
absolute_gap = upper_bound - lower_bound
soln.gap = absolute_gap
except CPLEXDirect._cplex_module.exceptions.CplexSolverError:
# something went wrong during the solve and no solution
# exists
pass
#Only try to get objective and variable values if a solution exists
soln_type = cplex_instance.solution.get_solution_type()
if soln_type > 0:
soln.objective[cplex_instance.objective.get_name()] = \
{'Value': cplex_instance.solution.get_objective_value()}
num_variables = cplex_instance.variables.get_num()
variable_names = cplex_instance.variables.get_names()
variable_values = cplex_instance.solution.get_values()
for i in xrange(num_variables):
variable_name = variable_names[i]
soln_variable[variable_name] = {"Value" : variable_values[i]}
if extract_reduced_costs:
# get variable reduced costs
rc_values = cplex_instance.solution.get_reduced_costs()
for i in xrange(num_variables):
soln_variable[variable_names[i]]["Rc"] = rc_values[i]
if extract_slacks or extract_duals:
num_linear_constraints = cplex_instance.linear_constraints.get_num()
constraint_names = cplex_instance.linear_constraints.get_names()
num_quadratic_constraints = cplex_instance.quadratic_constraints.get_num()
q_constraint_names = cplex_instance.quadratic_constraints.get_names()
for i in xrange(num_linear_constraints):
soln_constraint[constraint_names[i]] = {}
if extract_duals:
# get duals (linear constraints only)
dual_values = cplex_instance.solution.get_dual_values()
for i in xrange(num_linear_constraints):
soln_constraint[constraint_names[i]]["Dual"] = dual_values[i]
# CPLEX PYTHON API DOES NOT SUPPORT QUADRATIC DUAL COLLECTION
if extract_slacks:
# get linear slacks
slack_values = cplex_instance.solution.get_linear_slacks()
for i in xrange(num_linear_constraints):
# if both U and L exist (i.e., a range constraint) then
# R_ = U-L
R_ = cplex_instance.linear_constraints.get_range_values(i)
if R_ == 0.0:
soln_constraint[constraint_names[i]]["Slack"] = slack_values[i]
else:
# This is a range constraint for which cplex
# always returns the value of f(x)-L. In the
# spirit of conforming with the other writer,
# I will return the max (in absolute value) of
# L-f(x) and U-f(x)
Ls_ = slack_values[i]
Us_ = R_ - slack_values[i]
if Us_ > Ls_:
soln_constraint[constraint_names[i]]["Slack"] = Us_
else:
soln_constraint[constraint_names[i]]["Slack"] = -Ls_
# get quadratic slacks
slack_values = cplex_instance.solution.get_quadratic_slacks()
for i in xrange(num_quadratic_constraints):
# if both U and L exist (i.e., a range constraint) then
# R_ = U-L
soln_constraint[q_constraint_names[i]] = \
{"Slack" : slack_values[i]}
if isinstance(self._instance, IBlockStorage):
symbol_map = getattr(self._instance,
"._symbol_maps")[self._smap_id]
else:
symbol_map = self._instance.solutions.\
symbol_map[self._smap_id]
byObject = symbol_map.byObject
referenced_varnames = set(byObject[varid]
for varid in self._referenced_variable_ids)
names_to_delete = set(soln_variable.keys())-referenced_varnames
for varname in names_to_delete:
del soln_variable[varname]
results.solution.insert(soln)
self.results = results
# don't know if any of this is necessary!
# take care of the annoying (and empty) CPLEX temporary files in
# the current directory. this approach doesn't seem overly
# efficient, but python os module functions don't accept regular
# expression directly.
try:
filename_list = glob.glob("cplex.log") + \
glob.glob("clone*.log")
clone_re = re.compile('clone\d+\.log')
for filename in filename_list:
# CPLEX temporary files come in two flavors - cplex.log and
# clone*.log. the latter is the case for multi-processor
# environments.
#
# IMPT: trap the possible exception raised by the file not existing.
# this can occur in pyro environments where > 1 workers are
# running CPLEX, and were started from the same directory.
# these logs don't matter anyway (we redirect everything),
# and are largely an annoyance.
try:
if filename == 'cplex.log':
os.remove(filename)
elif clone_re.match(filename):
os.remove(filename)
except OSError:
pass
except OSError:
pass
self._active_cplex_instance = None
self._variable_symbol_map = None
self._instance = None
# finally, clean any temporary files registered with the temp file
# manager, created populated *directly* by this plugin.
pyutilib.services.TempfileManager.pop(remove=not self._keepfiles)
# let the base class deal with returning results.
return OptSolver._postsolve(self)
def _initialize_callbacks(self, model):
#
# Called from OptSolver
#
cplex_callback = {
"node-callback": CPLEXDirect._cplex_module.callbacks.NodeCallback,
"solve-callback": CPLEXDirect._cplex_module.callbacks.SolveCallback,
"branch-callback": CPLEXDirect._cplex_module.callbacks.BranchCallback,
"heuristic-callback": CPLEXDirect._cplex_module.callbacks.HeuristicCallback,
"incumbent-callback": CPLEXDirect._cplex_module.callbacks.IncumbentCallback,
"cut-callback": CPLEXDirect._cplex_module.callbacks.UserCutCallback,
"lazycut-callback": CPLEXDirect._cplex_module.callbacks.LazyConstraintCallback,
"crossover-callback": CPLEXDirect._cplex_module.callbacks.CrossoverCallback,
"barrier-callback": CPLEXDirect._cplex_module.callbacks.BarrierCallback,
"simplex-callback": CPLEXDirect._cplex_module.callbacks.SimplexCallback,
"presolve-callback": CPLEXDirect._cplex_module.callbacks.PresolveCallback,
"tuning-callback": CPLEXDirect._cplex_module.callbacks.TuningCallback
}
#
for name in self._callback:
try:
cb_class = cplex_callback[name]
except KeyError:
raise ValueError("Unknown callback name: %s" % name)
#
def call_fn(self, *args, **kwds):
try:
self.solver = CplexSolverWrapper(self)
self._callback[self.name](self.solver, model)
except Exception(e):
# Should we raise this exception?
print("ERROR: "+str(e))
CallbackClass = type('CallbackClass_'+name.replace('-','_'),
(cb_class,object),
{"_callback":self._callback,
"name":name,
"__call__":call_fn})
self._active_cplex_instance.register_callback(CallbackClass)
|
#coding: utf-8
import numpy as np
import sys
from matplotlib.pyplot import plot
from matplotlib.pyplot import show
#exp() linespace()
N = int (sys.argv[1])
#N = 10
weights = np.exp(np.linspace(-1,0.,N))
weights /= weights.sum()
print "Weights :", weights
c = np.loadtxt('data.csv',delimiter =',',usecols=(6,),unpack = True)
print len(c)
ema = np.convolve(weights,c)[N-1:-N+1]
t = np.arange(N-1,len(c))
plot(t,c[N-1:],lw = 1.0)
plot(t,ema,lw = 2.0)
show()
|
#recebendo o input
texto = str(input("Digite um texto ou palavra: "))
#modificando o texto
primeira_letra = texto[0]
ultima_letra = texto[-1]
tamanho_texto = len(texto)
texto_invertido = texto[::-1]
texto_maiusculo = texto.upper()
texto_minusculo = texto.lower()
#apresentando os resultados
print("\n\nA primeira e última letra do texto são: " + primeira_letra + " e " + ultima_letra + ".")
print("\nO texto possui " + str(tamanho_texto) + " letras.")
print("\nTexto invertido: " + texto_invertido + ".")
print("\nTexto maiúsculo: " + texto_maiusculo + ".")
print("\nTexto minúsculo: " + texto_minusculo + ".\n\n") |
import pandas as pd
import torch
from PIL import Image
import os
from maskrcnn_benchmark.structures.bounding_box import BoxList
class giro(object):
def __init__(self,ann_file,root,transforms=None): #the size value controls how big the pictures we want to be
self.root= root # directory for pictures
self.img_fnames=list(os.listdir(root))
self.fname_to_index={idx:item for idx,item in enumerate(self.img_fnames)}
annot = pd.read_csv(annfile, header = None ,sep = ' ' )
annot = annot.rename(columns={0: "dr1", 1: "frame", 2: "ID", 3: "X1", 4: "Y1", 5: "X2", 6: "Y2", 7: "dr2", 8: "object", 9: "dr3"})
annot.drop(columns = ["dr1", "dr2", "dr3"], inplace = True)
self.boxes=annot
self._transforms=transforms
self.object_to_cat={
'1F': 'Front View',
'1B': 'Back View',
'1L': 'Left View',
'1R': 'Right View',
'2': ' Bicycle Crowd',
'5H': 'High-Density Human Crowd',
'5L': 'Low-Density Human Crowd',
'0': 'irrelevant TV graphics'}
self.object_to_id={'1F': 0,
'1B': 1,
'1L': 2,
'1R': 3,
'2': 4,
'5H': 5,
'5L': 6,
'0': 7}
def get_frame_no(self,fname):
tmp=fname.split('_')
if tmp[1][-4:]=='.jpg':
return int(tmp[1][:-4])
else:
print('invalid filename')
def __getitem__(self, idx):
# load the image as a PIL Image
image = Image.open(os.path.join(self.root,self.fname_to_index[idx]))
# load the bounding boxes as a list of list of boxes
# in this case, for illustrative purposes, we use
# x1, y1, x2, y2 order.
M_=self.boxes[self.boxes.frame==self.get_frame_no(self.fname_to_index[idx])]
m__=M_[["X1", "Y1", "X2", "Y2"]].values.tolist()
labels = torch.tensor([self.object_to_id[obj] for obj in list(M_['object'])])
boxlist = BoxList(m__, image.size, mode="xyxy")
# add the labels to the boxlist
boxlist.add_field("labels", labels)
if self._transforms:
image, boxlist = self.transforms(image, boxlist)
return image, boxlist, idx
|
# encoding: utf-8
import hashlib
from datetime import datetime
from werkzeug import generate_password_hash, check_password_hash, cached_property
from flask import Blueprint, url_for, redirect, g, \
flash, request, current_app, render_template, send_from_directory, views, session
from flask.ext.login import login_user, logout_user, \
login_required, current_user, confirm_login, UserMixin
from flask.ext.sqlalchemy import BaseQuery
from flask.ext.wtf import Form, TextField, PasswordField, BooleanField, HiddenField, SubmitField, SelectField
from english.extension import db, login as login_manager
bp_users = Blueprint('users', __name__, url_prefix='/user')
#--------------------models---------------------------------------------------------------
class UserQuery(BaseQuery):
def authenticate(self, login, password):
user = self.filter(db.or_(User.username==login, User.email==login)).first()
if user:
authenticated = user.check_password(password)
else:
authenticated = False
return user, authenticated
def authenticate_openid(self, email, openid):
user = self.filter(User.email==email).first()
if user:
authenticated = user.check_openid(openid)
else:
authenticated = False
return user, authenticated
class User(db.Model, UserMixin):
query_class = UserQuery
# user roles
EDITOR = 100
ADMIN = 400
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Unicode(60), unique=True, nullable=False)
email = db.Column(db.String(150))
number = db.Column(db.String(150))
date_joined = db.Column(db.DateTime, default=datetime.utcnow)
role = db.Column(db.Integer, default=EDITOR)
_password = db.Column("password", db.String(80))
_openid = db.Column("openid", db.String(80), unique=True)
active = db.Column(db.Boolean, default=True)
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
def __str__(self):
return self.username
def __repr__(self):
return "<%s>" % self
def is_active(self):
return self.active
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = generate_password_hash(password)
password = db.synonym("_password",
descriptor=property(_get_password,
_set_password))
def check_password(self, password):
if self.password is None:
return False
return check_password_hash(self.password, password)
def _get_openid(self):
return self._openid
def _set_openid(self, openid):
self._openid = generate_password_hash(openid)
openid = db.synonym("_openid",
descriptor=property(_get_openid,
_set_openid))
@property
def is_admin(self):
return self.role >= self.ADMIN
@property
def is_editor(self):
return self.role >= self.EDITOR
def check_openid(self, openid):
if self.openid is None:
return False
return check_password_hash(self.openid, openid)
@cached_property
def gravatar(self):
if not self.email:
return ''
md5 = hashlib.md5()
md5.update(self.email.strip().lower())
return md5.hexdigest()
def gravatar_url(self, size=80):
if not self.gravatar:
return ''
return "http://www.gravatar.com/avatar/%s.jpg?s=%d" % (
self.gravatar, size)
#--------------------forms---------------------------------------------------------------
class LoginForm(Form):
username = TextField('用户名')
password = PasswordField('密码')
remember = BooleanField('记住登录')
next = HiddenField()
submit = SubmitField('登录')
class UserForm(Form):
username = TextField('用户名')
password = TextField('密码')
email = TextField('Email')
role = SelectField('角色', choices=[(User.ADMIN, '管理员')],
coerce=int)
#--------------------views---------------------------------------------------------------
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
@bp_users.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
print form.username.data, form.password.data
user, success = User.query.authenticate(form.username.data, form.password.data)
if success:
flash("Welcome back.")
login_user(user, remember=form.remember.data)
return redirect(request.args.get('next') or '/')
else:
flash("Eh, wrong name.", "error")
return render_template('user/login.html', form=form)
@bp_users.route('/logout')
@login_required
def logout():
logout_user()
return redirect(request.args.get('next') or '/')
|
N = int(input())
number = []
number=list(map(int, input().split()))[:N]
print(max(number)) |
from notesentry import NotesEntry
class NotesView:
'''
Wrapper for lotus.domino.View class
'''
def __init__(self,view):
self.view=view
self.known_entries = []
def _entrycollection_to_list(self,collection):
new_list=[]
entry = collection.getFirstEntry()
while entry:
new_list.append(NotesEntry(entry))
entry = collection.getNextEntry()
return new_list
def get_entries(self):
return self._entrycollection_to_list(self.view.getAllEntries())
def get_unread_entries(self):
return self._entrycollection_to_list(self.view.getAllUnreadEntries())
def get_new_entries(self):
'''
Checks new unread entry IDs against previously known list of IDs.
If an entry is marked as unread it will be picked up again as a new unread entry.
TODO: not sure if I want to "fix" this
'''
unread_entries = self.get_unread_entries()
new_known_entries = []
new_entries = []
for entry in unread_entries:
if entry.ID not in self.known_entries:
new_entries.append(entry.ID)
new_known_entries.append(entry.ID)
self.known_entries = new_known_entries
return new_entries
|
import os
import configargparse
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def create_config():
p = configargparse.getArgumentParser(default_config_files=[os.path.join(BASE_DIR, 'conf_local.ini'),
os.path.join(BASE_DIR, 'conf.ini'),
'/etc/cert-issuer/conf.ini'])
p.add('-c', '--my-config', required=False, is_config_file=True, help='config file path')
p.add_argument('--notifier_type', default='noop', type=str, env_var='NOTIFIER_TYPE',
help='type of notification on certificate introduction')
p.add_argument('--mongodb_uri', default='mongodb://localhost:27017/test', type=str, env_var='MONGODB_URI',
help='mongo connection URI')
p.add_argument('--mandrill_api_key', type=str, env_var='MANDRILL_API_KEY',
help='Mandrill API key; needed if notifier_type is mail')
p.add_argument('--issuer_email', type=str, env_var='ISSUER_EMAIL',
help='email address from which notification should be sent')
p.add_argument('--issuer_name', type=str, env_var='ISSUER_NAME', help='name from which notification should be sent')
p.add_argument('--subject', type=str, env_var='SUBJECT', help='notification subject line')
p.add_argument('--recent_certids', type=str, env_var='RECENT_CERTIDS', help='recent certificate ids')
p.add_argument('--secret_key', type=str, env_var='SECRET_KEY',
help='Flask secret key, to enable cryptographically signed sessions')
p.add_argument('--cert_store_type', type=str, env_var='CERT_STORE_TYPE',
help='type of key value store to use for Cert Store')
p.add_argument('--cert_store_path', type=str, env_var='CERT_STORE_PATH', help='path to file system Cert Store')
p.add_argument('--v1_aware', action='store_true', env_var='V1_AWARE', help='Whether to support v1 certs')
p.add_argument('--site_description', env_var='SITE_DESCRIPTION',
help='Site description got issuer that is hosting the certificate viewer',
default='A Blockchain Certificate Issuer Site')
p.add_argument('--issuer_logo_path', env_var='ISSUER_LOGO_PATH',
help='Issuer logo to display in certificate viewer', default='img/logo.png')
p.add_argument('--theme', env_var='THEME', help='Flask theme to use for display', default='default')
args, _ = p.parse_known_args()
return args
parsed_config = None
def get_config():
global parsed_config
if parsed_config:
return parsed_config
parsed_config = create_config()
return parsed_config
|
#!/usr/bin/env python
# ==================================================================================== #
#
# Copyright (c) 2017 Raffaele Bua (buele)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==================================================================================== #
from src.data.logger.logger import logger
from src.core.Downloader import Downloader
from src.data.database.db import DB
import configparser
from src.data.database.services.products_service import ProductsService
from src.data.database.services.products_service import ProductStatus
__author__ = "Raffaele Bua (buele)"
__copyright__ = "Copyright 2017, Raffaele Bua"
__credits__ = ["Raffaele Bua"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Raffaele Bua"
__contact__ = "info@raffaelebua.eu"
__status__ = "Development"
class DownloaderManager:
""" This class is the manager of all ichnosat platform.
It is to consider the logic layer of external interface. The admin functionalities are implemented
here.
"""
def __init__(self):
self.config = configparser.ConfigParser()
self.config_file_path = "src/downloader_manager/config/config.cfg"
self.config.read(self.config_file_path)
self.productService = ProductsService()
self.downloader = Downloader()
def trigger_downloader(self):
logger.debug("(DownloaderManager trigger_downloader) ")
logger.debug("(DownloaderManager trigger_downloader) call downloader ")
try:
self.downloader.start()
except Exception as err:
logger.debug("(DownloaderManager trigger_downloader) Unexpeted error:")
logger.debug(err)
def create_database(self):
try:
db = DB()
db.create_db()
return True
except Exception as err:
logger.debug("(DownloaderManager create_database) Unexpected error:")
logger.debug(err)
return False
def fix_inconsistent_data_in_db(self):
downloading_products = self.productService.get_downloading_products()
for product in downloading_products:
self.productService.update_product_status(product.name, ProductStatus.pending)
def get_pending_products(self):
logger.debug("(DownloaderManager get_pending_products) ")
return self.productService.get_pending_products()
def get_downloading_products(self):
return self.productService.get_downloading_products()
def get_downloaded_products(self):
return self.productService.get_downloaded_products()
def is_first_installation(self):
self.config.read(self.config_file_path)
return True if self.config['SYSTEM_STATUS']['first_installation'] == 'true' else False
def set_first_installation_config(self, new_status):
try:
value = 'true' if new_status else 'false'
config = configparser.RawConfigParser()
config.read(self.config_file_path)
config.set('SYSTEM_STATUS', 'first_installation', value)
with open(self.config_file_path, 'w') as configfile:
config.write(configfile)
return True
except Exception as err:
logger.debug("(DownloaderManager set_first_installation_config) Unexpected error:")
logger.debug(err)
return False
|
# Generated by Django 3.0.5 on 2020-05-05 08:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_character_armor_class'),
]
operations = [
migrations.RemoveField(
model_name='character',
name='weapons',
),
migrations.AddField(
model_name='character',
name='damage1',
field=models.CharField(blank=True, max_length=5),
),
migrations.AddField(
model_name='character',
name='damage2',
field=models.CharField(blank=True, max_length=5),
),
migrations.AddField(
model_name='character',
name='damage3',
field=models.CharField(blank=True, max_length=5),
),
migrations.AddField(
model_name='character',
name='damagetype1',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='character',
name='damagetype2',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='character',
name='damagetype3',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='character',
name='weapon1',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='character',
name='weapon2',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='character',
name='weapon3',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='character',
name='additional',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='backstory',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='bonds',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='flaws',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='ideals',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='organizations',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='traits',
field=models.CharField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='character',
name='treasure',
field=models.CharField(blank=True, max_length=3000),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
from skimage.util.shape import view_as_windows
import time
plt.rcParams.update(plt.rcParamsDefault)
plt.style.use('seaborn-deep')
def disp(loci, locj):
'''
Takes the coordinates of locations i and j and returns the euclidean distance between them
'''
s = locj - loci
return np.sqrt(np.dot(s,s))
def plot_pop(population, show=True, **kwargs):
'''
Takes a population object and plots the locations
TODO: area proportional to population?
'''
coords = population.locCoords
plt.scatter(coords[:,0], coords[:,1], s=100, color='grey')
plt.xlabel(r'$x$', fontsize=20)
plt.ylabel(r'$y$', fontsize=20)
plt.tick_params(axis='both', labelsize=15)
plt.tight_layout()
if show:
plt.show()
return
def sum4s(m):
'''
Splits NxN matrix m into 2x2 submatrices (chequerboard) and returns the sum of all elements in each submatrix as an (N/2)x(N/2) matrix
'''
Lnew = int(len(m)/2)
return view_as_windows(m, (2,2),step=2).reshape(int(m.size/4),4).sum(axis=1).reshape(Lnew,Lnew)
def gamma_est(S, exp=False):
'''
Takes the average population unit area <S> in m^2 and returns an estimate for the gamma exponent as proposed by Lenormand et al. 2012
'''
if exp:
return 0.3 * (S/1000000)**(-0.18) / 1000 # divide by 1000 to account for the change in distance units
else:
return 1.4 * (S/1000000)**(0.11)
def time_label():
'''
Returns current datetime string for labelling plots/data
'''
t = time.localtime()
return time.strftime('%b-%d-%Y_%H%M', t)
|
# Generated by Django 3.2.5 on 2021-07-22 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0005_task_note'),
]
operations = [
migrations.AlterField(
model_name='task',
name='note',
field=models.CharField(max_length=255),
),
]
|
from auto_layer import auto_layer_core
import maya.cmds as cmds
class Core(object):
def create_depth_cone(self):
"""
Creates a visual Aid for large scale sets on DTX
Returns:None
"""
self.set_render_layer()
if cmds.objExists('DTX_Depth_Cone'):
cmds.delete('DTX_Depth_Cone')
cmds.polyCone(name='DTX_Depth_Cone')
cmds.scale(250, 250, 250, 'DTX_Depth_Cone')
cmds.setAttr('DTX_Depth_Cone.castsShadows', 1)
cmds.setAttr('DTX_Depth_Cone.primaryVisibility', 1)
auto_layer_core.Matte().create_RGB_shader('Red')
auto_layer_core.Matte().force_shader('DTX_Depth_Cone', 'Red')
cmds.parentConstraint('*:DTX_Ren_CamRg_A:Depth_Locator_Ctrl', 'DTX_Depth_Cone')
cmds.delete('DTX_Depth_Cone', constraints=True)
cmds.makeIdentity('DTX_Depth_Cone', apply=True, translate=True, scale=True)
cmds.setAttr('DTX_Depth_Cone.rx', lock=True)
cmds.setAttr('DTX_Depth_Cone.ry', lock=True)
cmds.setAttr('DTX_Depth_Cone.rz', lock=True)
cmds.parentConstraint('DTX_Depth_Cone', '*:DTX_Ren_CamRg_A:Depth_Locator_Ctrl')
@staticmethod
def set_render_layer():
"""
Sets the renderlayer to default
Returns:None
"""
for layer in cmds.ls(type='renderLayer'):
if 'defaultRenderLayer' == layer:
cmds.editRenderLayerGlobals(currentRenderLayer=layer)
def set_up_display(self):
cmds.modelEditor('modelPanel4', e=True, allObjects=True)
cmds.modelEditor('modelPanel4', e=True, cameras=True, imagePlane=True,
lights=False, hud=True, nurbsCurves=False)
cmds.headsUpDisplay('HUD_Depth', remove=True)
cmds.button('zdepthButton', e=True, vis=False)
cmds.hudButton('HUD_Depth', s=7, b=6, vis=1, l='Set Zdepth', bw=800, bsh='roundRectangle',
pressCommand=lambda *args: self.get_depth(), releaseCommand=lambda *args: self.release())
def release(self, *args):
cmds.headsUpDisplay('HUD_Depth', remove=True)
def set_depth_locator(self):
"""
Configures the maya viewport to make it easier for the artist to move the depth cone
Returns:None
"""
self.create_depth_cone()
self.set_up_display()
cmds.lookThru('persp')
cmds.refresh(cv=True)
cmds.select('DTX_Depth_Cone')
cmds.viewFit(f=0.2)
def get_depth(self, *args):
"""
Gets the depth information set by the artist
Returns:None
"""
render_camera_depth = cmds.ls('*:DTX_Ren_CamRg_A:Depth_Measure_Tool')
zdepth_distance = cmds.getAttr('{}.distance'.format(render_camera_depth[0]))
print zdepth_distance
cmds.hide('DTX_Depth_Cone')
cmds.lookThru('*:DTX_Ren_CamRg_A:Render_CamShape')
cmds.refresh(cv=True)
self.set_depth(zdepth_distance)
def set_depth(self, zdepth_value):
"""
Applies the depth distance to
Returns:None
"""
# cmds.headsUpDisplay('HUD_Depth', remove=True)
render_elements_list = cmds.ls(type='VRayRenderElement')
for render_element in render_elements_list:
if 'Z_Depth' in render_element:
print render_element
cmds.setAttr('{}.vray_depthBlack'.format(render_element), zdepth_value)
cmds.floatFieldGrp('zdepthItem', e=True, value1=zdepth_value, enable=True)
cmds.button('zdepthButton', e=True, vis=True)
# Core().set_depth_locator()
|
import nltk
from nltk import FreqDist
from nltk.corpus import PlaintextCorpusReader
import re
from nltk.collocations import *
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
def alpha_filter(w):
# pattern to match a word of non-alphabetical characters
pattern = re.compile('^[^a-z]+$')
if pattern.match(w):
return True
else:
return False
def get_part_content(filename):
# get text file content and return the string
corpus = PlaintextCorpusReader('.', '.*\.txt')
content = corpus.raw(filename)
return content
def get_word_pos(word_pos_tag):
# get the words pos tags from pos_tag and return the corresponding wordnet tags
if word_pos_tag.startswith('J'):
return wordnet.ADJ
elif word_pos_tag.startswith('V'):
return wordnet.VERB
elif word_pos_tag.startswith('N'):
return wordnet.NOUN
elif word_pos_tag.startswith('R'):
return wordnet.ADV
else:
return None
def do_word_tokenize(content):
# do word tokenizing process
tokens = nltk.word_tokenize(content)
return tokens
def do_lower(content):
# set all words as lowercase
words = [w.lower() for w in content]
return words
def get_alphabetical_words(content):
# get alphabetical words
alpha_words = [w for w in content if not alpha_filter(w)]
return alpha_words
def get_stopwords():
# get stopwords
stop_words = nltk.corpus.stopwords.words('english')
stop = open('./Smart.English.stop', 'r')
stop_text = stop.read()
stop.close()
c_stopwords = nltk.word_tokenize(stop_text)
c_stopwords.extend(["'m", "n't", "'s", "make"])
stop_words.extend(c_stopwords)
stop_words = list(set(stop_words))
return stop_words
def filter_content(content, filter_words):
# filter special words in content
filtered_words = [w for w in content if w not in filter_words]
return filtered_words
def do_print_top_50(content):
# print top 50 frequency
freq_dist = FreqDist(content)
top_keys = freq_dist.most_common(50)
for pair in top_keys:
print(pair)
print('-----------------------------------')
def get_bi_gram_association_measures():
# get bigram measures
measures = nltk.collocations.BigramAssocMeasures()
return measures
def finder_filter(content):
# apply filter to finder
finder = BigramCollocationFinder.from_words(content)
finder.apply_freq_filter(5)
return finder
def print_bigram_score(bigram_measures, finder):
# print scores which are sorted into order by decreasing frequency
scored = finder.score_ngrams(bigram_measures.raw_freq)
for one_score in scored[:50]:
print(one_score)
print('-----------------------------------')
def print_PMI_measures(bigram_measures, finder):
# print PMI measures scores
scored = finder.score_ngrams(bigram_measures.pmi)
for one_score in scored[:50]:
print(one_score)
# get content in state_union_part text file
onePartContent = get_part_content('state_union_part1.txt')
# onePartContent = get_part_content('state_union_part2.txt')
# do word tokenize of the raw content
tokens_content = do_word_tokenize(onePartContent)
# get the pos tags for all words
words_tags = pos_tag(tokens_content)
# do lemmatization
word_net_le = WordNetLemmatizer()
lemmatization_words = []
for tag in words_tags:
word_pos = get_word_pos(tag[1]) or wordnet.NOUN
lemmatization_words.append(word_net_le.lemmatize(tag[0], pos=word_pos))
# set all the words in the content to be as lowercase
lower_words = do_lower(lemmatization_words)
# get alphabetical words
alphabetical_words = get_alphabetical_words(lower_words)
# get stop words
stopwords = get_stopwords()
# filter the stop words
filter_stopwords = filter_content(alphabetical_words, stopwords)
# print out the top 50 words
do_print_top_50(filter_stopwords)
# get bigram measures
bi_measures = get_bi_gram_association_measures()
# get the finder
finder = BigramCollocationFinder.from_words(filter_stopwords)
# print out the top 50 bigram score
print_bigram_score(bi_measures, finder)
# apply filter to finder
finder1 = finder_filter(filter_stopwords)
# print out the top 50 PMI measures score
print_PMI_measures(bi_measures, finder1)
|
# https://www.hackerrank.com/challenges/diagonal-difference/problem
def diagonalDifference(arr):
# Write your code here
# frist diagonal
n = len(arr)
m = len(arr[0])
diag1 = 0
diag2 = 0
for i in range(n):
diag1 = diag1 + arr[i][i]
for i in range(n):
m = m-1
diag2 = diag2 + arr[i][m]
return abs(diag1-diag2)
|
from rfc3987 import parse
import socket
import re
import urllib2
class Validations:
def __init__(self, db, domain_config):
self.db = db
socket.setdefaulttimeout(30)
self.local_domain = domain_config.get('name', None)
def check_valid_domain(self, domain):
pattern = re.compile(
r'^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|'
r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|'
r'([a-zA-Z0-9][-_.a-zA-Z0-9]{1,61}[a-zA-Z0-9]))\.'
r'([a-zA-Z]{2,13}|[a-zA-Z0-9-]{2,30}.[a-zA-Z]{2,3})$'
)
return pattern.search(domain)
def check_destination_url(self, url):
try:
o = parse(url)
return o.get('scheme') and o.get('authority')
except ValueError:
return False
def check_domain_exists(self, domain):
try:
socket.gethostbyname(domain)
return True
except socket.gaierror:
return False
def check_local_domain(self, domain):
pattern = re.compile(
r'^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|'
r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|'
r'([a-zA-Z0-9][-_.a-zA-Z0-9]{1,61}[a-zA-Z0-9]))\.'
r'%s$' % self.local_domain
)
return pattern.search(domain)
def check_url_valid_status_code(self, url):
try:
urllib2.urlopen(url)
return True
except urllib2.HTTPError, e:
return False
except urllib2.URLError, e:
return False
def is_duplicated_in_db(self, domain):
return self.db.get_redirection_by_domain(domain) != None
def check_redirection_can_be_added(self, domain, url):
if not self.check_valid_domain(domain):
return { "status": 500, "message": "Invalid domain" }
if not self.check_destination_url(url):
return { "status": 500, "message": "Invalid redirection url" }
if self.local_domain and not self.check_domain_exists(domain) and not self.check_local_domain(domain):
return { "status": 500, "message": "The domain doesn't exists and doesn't pertain to the %s parent domain" % self.local_domain }
if not self.check_url_valid_status_code(url):
return { "status": 500, "message": "The redirection URL doesn't return a valid status code" }
if self.is_duplicated_in_db(domain):
return { "status": 500, "message": "The domain already has a redirection" }
return { "status": 200, "message": "ok " }
def check_redirection_status(self, redirection):
domain = redirection.get("domain")
url = redirection.get("url")
if not self.check_valid_domain(domain):
return { "status": 500, "message": "Invalid domain" }
if not self.check_destination_url(url):
return { "status": 500, "message": "Invalid redirection url" }
if self.local_domain and not self.check_domain_exists(domain) and not self.check_local_domain(domain):
return { "status": 500, "message": "The domain doesn't exists and doesn't pertain to the %s parent domain" % self.local_domain }
if not self.check_url_valid_status_code(url):
return { "status": 500, "message": "The redirection URL doesn't return a valid status code" }
return { "status": 200, "message": "ok " }
|
__version__ = "2.3.0"
import abc
from enum import Enum
from typing import (
Dict,
Iterator,
List,
Tuple,
Union,
Any,
Optional,
Callable,
overload,
TypeVar,
Type,
)
import PyQt5.QtCore
import PyQt5.QtGui
import PyQt5.QtWidgets
MoVariant = Union[None, bool, int, str, List[Any], Dict[str, Any]]
GameFeatureType = TypeVar("GameFeatureType")
class InterfaceNotImplemented: ...
def getFileVersion(filepath: str) -> str:
"""
Retrieve the file version of the given executable.
Args:
filepath: Absolute path to the executable.
Returns:
The file version, or an empty string if the file version could not be retrieved.
"""
...
def getIconForExecutable(executable: str) -> PyQt5.QtGui.QIcon:
"""
Retrieve the icon of an executable. Currently this always extracts the biggest icon.
Args:
executable: Absolute path to the executable.
Returns:
The icon for this executable, if any.
"""
...
def getProductVersion(executable: str) -> str:
"""
Retrieve the product version of the given executable.
Args:
executable: Absolute path to the executable.
Returns:
The product version, or an empty string if the product version could not be retrieved.
"""
...
class GuessQuality(Enum):
"""
Describes how good the code considers a guess (i.e. for a mod name) this is used to
determine if a name from another source should overwrite or not.
"""
INVALID = ...
FALLBACK = ...
GOOD = ...
META = ...
PRESET = ...
USER = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class InstallResult(Enum):
SUCCESS = ...
FAILED = ...
CANCELED = ...
MANUAL_REQUESTED = ...
NOT_ATTEMPTED = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class LoadOrderMechanism(Enum):
FILE_TIME = ...
PLUGINS_TXT = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class ModState(Enum):
EXISTS = ...
ACTIVE = ...
ESSENTIAL = ...
EMPTY = ...
ENDORSED = ...
VALID = ...
ALTERNATE = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class PluginState(Enum):
MISSING = ...
INACTIVE = ...
ACTIVE = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class ProfileSetting(Enum):
MODS = ...
CONFIGURATION = ...
SAVEGAMES = ...
PREFER_DEFAULTS = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class ReleaseType(Enum):
PRE_ALPHA = ...
ALPHA = ...
BETA = ...
CANDIDATE = ...
FINAL = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class SortMechanism(Enum):
NONE = ...
MLOX = ...
BOSS = ...
LOOT = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class VersionScheme(Enum):
DISCOVER = ...
REGULAR = ...
DECIMAL_MARK = ...
NUMBERS_AND_LETTERS = ...
DATE = ...
LITERAL = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class BSAInvalidation(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def activate(self, profile: "IProfile"): ...
@abc.abstractmethod
def deactivate(self, profile: "IProfile"): ...
@abc.abstractmethod
def isInvalidationBSA(self, name: str) -> bool: ...
class DataArchives(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def addArchive(self, profile: "IProfile", index: int, name: str):
"""
Add an archive to the archive list.
Args:
profile: Profile to add the archive to.
index: Index to insert before. Use 0 for the beginning of the list or INT_MAX for the end of the list).
name: Name of the archive to add.
"""
...
@abc.abstractmethod
def archives(self, profile: "IProfile") -> List[str]:
"""
Retrieve the list of archives in the given profile.
Args:
profile: Profile to retrieve archives from.
Returns:
The list of archives in the given profile.
"""
...
@abc.abstractmethod
def removeArchive(self, profile: "IProfile", name: str):
"""
Remove the given archive from the given profile.
Args:
profile: Profile to remove the archive from.
name: Name of the archive to remove.
"""
...
@abc.abstractmethod
def vanillaArchives(self) -> List[str]:
"""
Retrieve the list of vanilla archives.
Vanilla archives are archive files that are shipped with the original
game.
Returns:
The list of vanilla archives.
"""
...
class ExecutableForcedLoadSetting:
def __init__(self, process: str, library: str): ...
def enabled(self) -> bool: ...
def forced(self) -> bool: ...
def library(self) -> str: ...
def process(self) -> str: ...
def withEnabled(self, enabled: bool) -> "ExecutableForcedLoadSetting": ...
def withForced(self, forced: bool) -> "ExecutableForcedLoadSetting": ...
class ExecutableInfo:
def __init__(self, title: str, binary: PyQt5.QtCore.QFileInfo): ...
def arguments(self) -> List[str]: ...
def asCustom(self) -> "ExecutableInfo": ...
def binary(self) -> PyQt5.QtCore.QFileInfo: ...
def isCustom(self) -> bool: ...
def isValid(self) -> bool: ...
def steamAppID(self) -> str: ...
def title(self) -> str: ...
def withArgument(self, argument: str) -> "ExecutableInfo": ...
def withSteamAppId(self, app_id: str) -> "ExecutableInfo": ...
def withWorkingDirectory(
self, directory: PyQt5.QtCore.QDir
) -> "ExecutableInfo": ...
def workingDirectory(self) -> PyQt5.QtCore.QDir: ...
class FileInfo:
"""
Information about a virtualised file
"""
@property
def archive(self) -> str: ...
@archive.setter
def archive(self, arg0: str): ...
@property
def filePath(self) -> str: ...
@filePath.setter
def filePath(self, arg0: str): ...
@property
def origins(self) -> List[str]: ...
@origins.setter
def origins(self, arg0: List[str]): ...
def __init__(self):
"""
Creates an uninitialized FileInfo.
"""
...
class FileTreeEntry:
"""
Represent an entry in a file tree, either a file or a directory. This class
inherited by IFileTree so that operations on entry are the same for a file or
a directory.
This class provides convenience methods to query information on the file, like its
name or the its last modification time. It also provides a convenience astree() method
that can be used to retrieve the tree corresponding to its entry in case the entry
represent a directory.
"""
class FileTypes(Enum):
"""
Enumeration of the different file type or combinations.
"""
DIRECTORY = ...
FILE = ...
FILE_OR_DIRECTORY = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
DIRECTORY: "FileTreeEntry.FileTypes" = ...
FILE: "FileTreeEntry.FileTypes" = ...
FILE_OR_DIRECTORY: "FileTreeEntry.FileTypes" = ...
@overload
def __eq__(self, arg2: str) -> bool: ...
@overload
def __eq__(self, arg2: "FileTreeEntry") -> bool: ...
@overload
def __eq__(self, other: object) -> bool: ...
def __repr__(self) -> str: ...
def detach(self) -> bool:
"""
Detach this entry from its parent tree.
Returns:
True if the entry was removed correctly, False otherwize.
"""
...
def fileType(self) -> "FileTreeEntry.FileTypes":
"""
Returns:
The filetype of this entry.
"""
...
@overload
def hasSuffix(self, suffixes: List[str]) -> bool:
"""
Check if this entry has one of the given suffixes.
Args:
suffixes: Suffixes to check.
Returns:
True if this entry is a file and has one of the given suffix.
"""
...
@overload
def hasSuffix(self, suffix: str) -> bool:
"""
Check if this entry has the given suffix.
Args:
suffix: Suffix to check.
Returns:
True if this entry is a file and has the given suffix.
"""
...
def isDir(self) -> bool:
"""
Returns:
True if this entry is a directory, False otherwise.
"""
...
def isFile(self) -> bool:
"""
Returns:
True if this entry is a file, False otherwise.
"""
...
def moveTo(self, tree: "IFileTree") -> bool:
"""
Move this entry to the given tree.
Args:
tree: The tree to move this entry to.
Returns:
True if the entry was moved correctly, False otherwize.
"""
...
def name(self) -> str:
"""
Returns:
The name of this entry.
"""
...
def parent(self) -> Optional["IFileTree"]:
"""
Returns:
The parent tree containing this entry, or a `None` if this entry is the root
or the parent tree is unreachable.
"""
...
def path(self, sep: str = "\\") -> str:
"""
Retrieve the path from this entry up to the root of the tree.
This method propagate up the tree so is not constant complexity as
the full path is never stored.
Args:
sep: The type of separator to use to create the path.
Returns:
The path from this entry to the root, including the name of this entry.
"""
...
def pathFrom(self, tree: "IFileTree", sep: str = "\\") -> str:
"""
Retrieve the path from the given tree to this entry.
Args:
tree: The tree to reach, must be a parent of this entry.
sep: The type of separator to use to create the path.
Returns:
The path from the given tree to this entry, including the name of this entry, or
an empty string if the given tree is not a parent of this entry.
"""
...
def suffix(self) -> str:
"""
Retrieve the "last" extension of this entry.
The "last" extension is everything after the last dot in the file name.
Returns:
The last extension of this entry, or an empty string if the file has no extension
or is directory.
"""
...
class GamePlugins(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def getLoadOrder(self) -> List[str]: ...
@abc.abstractmethod
def lightPluginsAreSupported(self) -> bool:
"""
Returns:
True if light plugins are supported, False otherwise.
"""
...
@abc.abstractmethod
def readPluginLists(self, plugin_list: "IPluginList"): ...
@abc.abstractmethod
def writePluginLists(self, plugin_list: "IPluginList"): ...
class GuessedString:
"""
Represents a string that may be set from different places. Each time the value is
changed a "quality" is specified to say how probable it is the value is the best choice.
Only the best choice should be used in the end but alternatives can be queried. This
class also allows a filter to be set. If a "guess" doesn't pass the filter, it is ignored.
"""
@overload
def __init__(self):
"""
Creates a GuessedString with no associated value.
"""
...
@overload
def __init__(self, value: str, quality: "GuessQuality"):
"""
Creates a GuessedString with the given value and quality.
Args:
value: Initial value of the GuessedString.
quality: Quality of the initial value.
"""
...
def __str__(self) -> str: ...
@overload
def reset(self) -> "GuessedString":
"""
Reset this GuessedString to an invalid state.
Returns:
This GuessedString object.
"""
...
@overload
def reset(self, value: str, quality: "GuessQuality") -> "GuessedString":
"""
Reset this GuessedString object with the given value and quality, only
if the given quality is better than the current one.
Args:
value: New value for this GuessedString.
quality: Quality of the new value.
Returns:
This GuessedString object.
"""
...
@overload
def reset(self, other: "GuessedString") -> "GuessedString":
"""
Reset this GuessedString object by copying the given one, only
if the given one has better quality.
Args:
other: The GuessedString to copy.
Returns:
This GuessedString object.
"""
...
def setFilter(self, filter: Callable[[str], Union[str, bool]]):
"""
Set the filter for this GuessedString.
The filter is applied on every `update()` and can reject the new value
altogether or modify it (by returning a new value).
Args:
filter: The new filter.
"""
...
@overload
def update(self, value: str) -> "GuessedString":
"""
Update this GuessedString by adding the given value to the list of variants
and setting the actual value without changing the current quality of this
GuessedString.
The GuessedString is only updated if the given value passes the filter.
Args:
value: The new value for this string.
Returns:
This GuessedString object.
"""
...
@overload
def update(self, value: str, quality: "GuessQuality") -> "GuessedString":
"""
Update this GuessedString by adding a new variants with the given quality.
If the specified quality is better than the current one, the actual value of
the GuessedString is also updated.
The GuessedString is only updated if the given value passes the filter.
Args:
value: The new variant to add.
quality: The quality of the variant.
Returns:
This GuessedString object.
"""
...
def variants(self) -> List[str]:
"""
Returns:
The list of variants for this GuessedString.
"""
...
class IDownloadManager(PyQt5.QtCore.QObject):
downloadComplete: PyQt5.QtCore.pyqtSignal = ...
downloadPaused: PyQt5.QtCore.pyqtSignal = ...
downloadFailed: PyQt5.QtCore.pyqtSignal = ...
downloadRemoved: PyQt5.QtCore.pyqtSignal = ...
def _object(self) -> PyQt5.QtCore.QObject:
"""
Returns:
The underlying `QObject` for the manager.
"""
...
def downloadPath(self, id: int) -> str:
"""
Retrieve the (absolute) path of the specified download.
Args:
id: ID of the download.
Returns:
The absolute path to the file corresponding to the given download. This file
may not exist yet if the download is incomplete.
"""
...
def startDownloadNexusFile(self, mod_id: int, file_id: int) -> int:
"""
Download a file from www.nexusmods.com/<game>. <game> is always the game
currently being managed.
Args:
mod_id: ID of the mod to download the file from.
file_id: ID of the file to download.
Returns:
An ID identifying the download.
"""
...
def startDownloadURLs(self, urls: List[str]) -> int:
"""
Download a file by url.
The list can contain alternative URLs to allow the download manager to switch
in case of download problems
Args:
urls: List of urls to download from.
Returns:
An ID identifying the download.
"""
...
class IFileTree(FileTreeEntry):
"""
Interface to classes that provides way to visualize and alter file trees. The tree
may not correspond to an actual file tree on the disk (e.g., inside an archive,
from a QTree Widget, ...).
Read-only operations on the tree are thread-safe, even when the tree has not been populated
yet.
In order to prevent wrong usage of the tree, implementing classes may throw
UnsupportedOperationException if an operation is not supported. By default, all operations
are supported, but some may not make sense in many situations.
The goal of this is not reflect the change made to a IFileTree to the disk, but child
classes may override relevant methods to do so.
The tree is built upon FileTreeEntry. A given tree holds shared pointers to its entries
while each entry holds a weak pointer to its parent, this means that the descending link
are strong (shared pointers) but the uplink are weaks.
Accessing the parent is always done by locking the weak pointer so that returned pointer
or either null or valid. This structure implies that as long as the initial root lives,
entry should not be destroyed, unless the entry are detached from the root and no shared
pointers are kept.
However, it is not guarantee that one can go up the tree from a single node entry. If the
root node is destroyed, it will not be possible to go up the tree, even if we still have
a valid shared pointer.
"""
class InsertPolicy(Enum):
FAIL_IF_EXISTS = ...
REPLACE = ...
MERGE = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
class WalkReturn(Enum):
"""
Enumeration that can be returned by the callback for the `walk()` method to stop the
walking operation early.
"""
CONTINUE = ...
STOP = ...
SKIP = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
CONTINUE: "IFileTree.WalkReturn" = ...
FAIL_IF_EXISTS: "IFileTree.InsertPolicy" = ...
MERGE: "IFileTree.InsertPolicy" = ...
REPLACE: "IFileTree.InsertPolicy" = ...
SKIP: "IFileTree.WalkReturn" = ...
STOP: "IFileTree.WalkReturn" = ...
def __bool__(self) -> bool:
"""
Returns:
True if this tree is not empty, False otherwise.
"""
...
def __getitem__(self, index: int) -> "FileTreeEntry":
"""
Retrieve the entry at the given index in this tree.
Args:
index: Index of the entry to retrieve, must be less than the size.
Returns:
The entry at the given index.
Raises:
IndexError: If the given index is not in range for this tree.
"""
...
def __iter__(self) -> Iterator[FileTreeEntry]:
"""
Retrieves an iterator for entries directly under this tree.
This method does not recurse into subtrees, see `walk()` for this.
Returns:
An iterator object that can be used to iterate over entries in this tree.
"""
...
def __len__(self) -> int:
"""
Returns:
The number of entries directly under this tree.
"""
...
def __repr__(self) -> str: ...
def addDirectory(self, path: str) -> "IFileTree":
"""
Create a new directory tree under this tree.
This method will create missing folders in the given path and will
not fail if the directory already exists but will fail if the given
path contains "." or "..".
This method invalidates iterators to this tree and all the subtrees
present in the given path.
Args:
path: Path to the directory to create.
Returns:
An IFileTree corresponding to the created directory.
Raises:
RuntimeError: If the directory could not be created.
"""
...
def addFile(self, path: str, replace_if_exists: bool = False) -> "FileTreeEntry":
"""
Create a new file directly under this tree.
This method will fail if the file already exists and `replace_if_exists` is `False`.
This method invalidates iterators to this tree and all the subtrees present in the
given path.
Args:
path: Path to the file to create.
replace_if_exists: If True and an entry already exists at the given location, it will be replaced by a new entry. This will replace both files and directories.
Returns:
A FileTreeEntry corresponding to the created file.
Raises:
RuntimeError: If the file could not be created.
"""
...
def clear(self) -> bool:
"""
Delete (detach) all the entries from this tree.
This method will go through the entries in this tree and stop at the first
entry that cannot be deleted, this means that the tree can be partially cleared.
Returns:
True if all entries have been detached, False otherwise.
"""
...
def copy(
self,
entry: "FileTreeEntry",
path: str = "",
policy: "IFileTree.InsertPolicy" = InsertPolicy.FAIL_IF_EXISTS,
) -> "FileTreeEntry":
"""
Move the given entry to the given path under this tree.
The entry must not be a parent tree of this tree. This method can also be used
to rename entries.
If the insert policy if FAIL_IF_EXISTS, the call will fail if an entry
at the same location already exists. If the policy is REPLACE, an existing
entry will be replaced. If MERGE, the entry will be merged with the existing
one (if the entry is a file, and a file exists, the file will be replaced).
This method invalidates iterator to this tree, to the parent tree of the given
entry, and to subtrees of this tree if the insert policy is MERGE.
Args:
entry: Entry to copy.
path: The path to copy the entry to. If the path ends with / or \\, the entry will be copied in the corresponding directory instead of replacing it. If the
given path is empty (`""`), the entry is copied directly under this tree.
policy: Policy to use to resolve conflicts.
Returns:
The new entry (copy of the specified entry).
Raises:
RuntimeError: If the entry could not be copied.
"""
...
def createOrphanTree(self, name: str = "") -> "IFileTree":
"""
Create a new orphan empty tree.
Args:
name: Name of the tree.
Returns:
A new tree without any parent.
"""
...
def exists(
self,
path: str,
type: "FileTreeEntry.FileTypes" = FileTreeEntry.FileTypes.FILE_OR_DIRECTORY,
) -> bool:
"""
Check if the given entry exists.
Args:
path: Path to the entry, separated by / or \\.
type: The type of the entry to check.
Returns:
True if the entry was found, False otherwise.
"""
...
def find(
self,
path: str,
type: "FileTreeEntry.FileTypes" = FileTreeEntry.FileTypes.FILE_OR_DIRECTORY,
) -> Optional[Union["IFileTree", "FileTreeEntry"]]:
"""
Retrieve the given entry.
If no entry exists at the given path, or if the entry is not of the right
type, `None` is returned.
Args:
path: Path to the entry, separated by / or \\.
type: The type of the entry to check.
Returns:
The entry at the given location, or `None` if the entry was not found or
was not of the correct type.
"""
...
def insert(
self,
entry: "FileTreeEntry",
policy: "IFileTree.InsertPolicy" = InsertPolicy.FAIL_IF_EXISTS,
) -> bool:
"""
Insert the given entry in this tree, removing it from its
previouis parent.
The entry must not be this tree or a parent entry of this tree.
- If the insert policy if `FAIL_IF_EXISTS`, the call will fail if an entry
with the same name already exists.
- If the policy is `REPLACE`, an existing entry will be replaced by the given entry.
- If the policy is `MERGE`:
- If there is no entry with the same name, the new entry is inserted.
- If there is an entry with the same name:
- If both entries are files, the old file is replaced by the given entry.
- If both entries are directories, a merge is performed as if using merge().
- Otherwize the insertion fails (two entries with different types).
This method invalidates iterator to this tree, to the parent tree of the given
entry, and to subtrees of this tree if the insert policy is MERGE.
Args:
entry: Entry to insert.
policy: Policy to use to resolve conflicts.
Returns:
True if the entry was insert, False otherwise.
"""
...
def merge(
self, other: "IFileTree", overwrites: bool = False
) -> Union[Dict["FileTreeEntry", "FileTreeEntry"], int]:
"""
Merge the given tree with this tree, i.e., insert all entries
of the given tree into this tree.
The tree must not be this tree or a parent entry of this tree. Files present in both tree
will be replaced by files in the given tree. After a merge, the source tree will be
empty but still attached to its parent.
If `overwrites` is `True`, a map from overriden files to new files will be returned.
Note that the merge process makes no distinction between files and directories
when merging: if a directory is present in this tree and a file from source
is in conflict with it, the tree will be removed and the file inserted; if a file
is in this tree and a directory from source is in conflict with it, the file will
be replaced with the directory.
This method invalidates iterators to this tree, all the subtrees under this tree
present in the given path, and all the subtrees of the given source.
Args:
other: Tree to merge.
overwrites: If True, a mapping from overriden files to new files will be returned.
Returns:
If `overwrites` is True, a mapping from overriden files to new files, otherwise
the number of overwritten entries.
Raises:
RuntimeError: If the merge failed.
"""
...
def move(
self,
entry: "FileTreeEntry",
path: str,
policy: "IFileTree.InsertPolicy" = InsertPolicy.FAIL_IF_EXISTS,
) -> bool:
"""
Move the given entry to the given path under this tree.
The entry must not be a parent tree of this tree. This method can also be used
to rename entries.
If the insert policy if FAIL_IF_EXISTS, the call will fail if an entry
at the same location already exists. If the policy is REPLACE, an existing
entry will be replaced. If MERGE, the entry will be merged with the existing
one (if the entry is a file, and a file exists, the file will be replaced).
This method invalidates iterator to this tree, to the parent tree of the given
entry, and to subtrees of this tree if the insert policy is MERGE.
Args:
entry: Entry to move.
path: The path to move the entry to. If the path ends with / or \\, the entry will be inserted in the corresponding directory instead of replacing it. If the
given path is empty (`""`), this is equivalent to `insert()`.
policy: Policy to use to resolve conflicts.
Returns:
True if the entry was moved correctly, False otherwise.
"""
...
def pathTo(self, entry: "FileTreeEntry", sep: str = "\\") -> str:
"""
Retrieve the path from this tree to the given entry.
Args:
entry: The entry to reach, must be in this tree.
sep: The type of separator to use to create the path.
Returns:
The path from this tree to the given entry, including the name of the entry, or
an empty string if the given entry was not found under this tree.
"""
...
@overload
def remove(self, name: str) -> bool:
"""
Delete the entry with the given name.
This method does not recurse into subtrees, so the entry should be
accessible directly from this tree.
Args:
name: Name of the entry to delete.
Returns:
True if the entry was deleted, False otherwise.
"""
...
@overload
def remove(self, entry: "FileTreeEntry") -> bool:
"""
Delete the given entry.
Args:
entry: Entry to delete. The entry must belongs to this tree (and not to a subtree).
Returns:
True if the entry was deleted, False otherwise.
"""
...
def removeAll(self, names: List[str]) -> int:
"""
Delete the entries with the given names from the tree.
This method does not recurse into subtrees, so only entries accessible
directly from this tree will be removed. This method invalidates iterators.
Args:
names: Names of the entries to delete.
Returns:
The number of deleted entry.
"""
...
def removeIf(self, filter: Callable[["FileTreeEntry"], bool]) -> int:
"""
Delete entries matching the given predicate from the tree.
This method does not recurse into subtrees, so only entries accessible
directly from this tree will be removed. This method invalidates iterators.
Args:
filter: Predicate that should return true for entries to delete.
Returns:
The number of deleted entry.
"""
...
def walk(
self,
callback: Callable[[str, "FileTreeEntry"], "IFileTree.WalkReturn"],
sep: str = "\\",
):
"""
Walk this tree, calling the given function for each entry in it.
The given callback will be called with two parameters: the path from this tree to the given entry
(with a trailing separator, not including the entry name), and the actual entry. The method returns
a `WalkReturn` object to indicates what to do.
Args:
callback: Method to call for each entry in the tree.
sep: Type of separator to use to construct the path.
"""
...
class IInstallationManager:
def createFile(self, entry: "FileTreeEntry") -> str:
"""
Create a new file on the disk corresponding to the given entry.
This method can be used by installer that needs to create files that are not in the original
archive. At the end of the installation, if there are entries in the final tree that were used
to create files, the corresponding files will be moved to the mod folder.
Temporary files corresponding to created files are automatically cleaned up at the end of
the installation.
Args:
entry: The entry for which a temporary file should be created.
Returns:
The path to the created file, or an empty string if the file could not be created.
"""
...
def extractFile(self, entry: "FileTreeEntry", silent: bool = False) -> str:
"""
Extract the specified file from the currently opened archive to a temporary
location.
This method cannot be used to extract directory.
The call will fail with an exception if no archive is open (plugins deriving from
IPluginInstallerSimple can rely on that, custom installers should not). The temporary
file is automatically cleaned up after the installation. This call can be very slow
if the archive is large and "solid".
Args:
entry: Entry corresponding to the file to extract.
silent: If true, the dialog showing extraction progress will not be shown.
Returns:
The absolute path to the temporary file, or an empty string if the file was not extracted.
"""
...
def extractFiles(
self, entries: List["FileTreeEntry"], silent: bool = False
) -> List[str]:
"""
Extract the specified files from the currently opened archive to a temporary
location.
This method cannot be used to extract directories.
The call will fail with an exception if no archive is open (plugins deriving from
IPluginInstallerSimple can rely on that, custom installers should not). The temporary
files are automatically cleaned up after the installation. This call can be very slow
if the archive is large and "solid".
Args:
entries: Entries corresponding to the files to extract.
silent: If true, the dialog showing extraction progress will not be shown.
Returns:
A list containing absolute paths to the temporary files.
"""
...
def getSupportedExtensions(self) -> List[str]:
"""
Returns:
The extensions of archives supported by this installation manager.
"""
...
def installArchive(
self, mod_name: "GuessedString", archive: str, mod_id: int
) -> "InstallResult":
"""
Install the given archive.
Args:
mod_name: Suggested name of the mod.
archive: Path to the archive to install.
mod_id: ID of the mod, if available.
Returns:
The result of the installation.
"""
...
def setURL(self, url: str):
"""
Set the url associated with the mod being installed.
Args:
url: Url to set.
"""
...
class IModInterface:
def absolutePath(self) -> str:
"""
Returns:
Absolute path to the mod to be used in file system operations.
"""
...
def addCategory(self, name: str):
"""
Assign a category to the mod. If the named category does not exist it is created.
Args:
name: Name of the new category to assign.
"""
...
def addNexusCategory(self, category_id: int):
"""
Set the category id from a nexus category id. Conversion to MO ID happens internally.
If a mapping is not possible, the category is set to the default value.
Args:
category_id: The Nexus category ID.
"""
...
def categories(self) -> List[str]:
"""
Returns:
The list of categories assigned to this mod.
"""
...
def name(self) -> str:
"""
Returns:
The name of this mod.
"""
...
def remove(self) -> bool:
"""
Delete the mod from the disc.
This does not update the global ModInfo structure or indices.
Returns:
True if the mod was deleted, False otherwise.
"""
...
def removeCategory(self, name: str) -> bool:
"""
Unassign a category from this mod.
Args:
name: Name of the category to remove.
Returns:
True if the category was removed, False otherwise (e.g. if no such category
was assigned).
"""
...
def setGameName(self, name: str):
"""
Set the source game of this mod.
Args:
name: The new source game short name of this mod.
"""
...
def setIsEndorsed(self, endorsed: bool):
"""
Set endorsement state of the mod.
Args:
endorsed: New endorsement state of this mod.
"""
...
def setName(self, name: str) -> bool:
"""
Set the name of this mod.
This will also update the name of the directory that contains this mod
Args:
name: New name for this mod.
Returns:
True if the name was changed, False if an error occured (e.g. if the name is not a valid
directory name).
"""
...
def setNewestVersion(self, version: "VersionInfo"):
"""
Set the latest known version of this mod.
Args:
version: The latest known version of this mod.
"""
...
def setNexusID(self, nexus_id: int):
"""
Set the Nexus ID of this mod.
Args:
nexus_id: Thew new Nexus ID of this mod.
"""
...
def setVersion(self, version: "VersionInfo"):
"""
Set the version of this mod.
Args:
version: The new version of this mod.
"""
...
class IModList:
"""
Interface to the mod-list.
All api functions in this interface work need the internal name of a mod to find a
mod. For regular mods (mods the user installed) the display name (as shown to the user)
and internal name are identical. For other mods (non-MO mods) there is currently no way
to translate from display name to internal name because the display name might not me un-ambiguous.
"""
def allMods(self) -> List[str]:
"""
Returns:
A list containing the internal names of all installed mods.
"""
...
def displayName(self, name: str) -> str:
"""
Retrieve the display name of a mod from its internal name.
If you received an internal name from the API (e.g. `IPluginList.origin`) then you should use
that name to identify the mod in all other api calls but use this function to retrieve the name
to show to the user.
Args:
name: Internal name of the mod.
Returns:
The display name of the given mod.
"""
...
def onModMoved(self, callback: Callable[[str, int, int], None]) -> bool:
"""
Install a handler to be called when a mod is moved.
Args:
callback: The function to call when a mod is moved. The first argument is the internal name of the mod, the second argument the old priority and the third argument the new priority.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onModStateChanged(self, callback: Callable[[str, int], None]) -> bool:
"""
Install a handler to be called when a mod state changes (enabled/disabled, endorsed, ...).
Args:
callback: The function to call when the state of a mod changes. The first argument is the internal mod name, and the second one the new state of the mod.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def priority(self, name: str) -> int:
"""
Retrieve the priority of a mod.
Args:
name: Internal name of the mod.
Returns:
The priority of the given mod.
"""
...
def setActive(self, name: str, active: bool) -> bool:
"""
Enable or disable a mod.
Calling this will cause MO to re-evaluate its virtual file system so this is
a fairly expensive call.
Args:
name: Internal name of the mod to enable or disable.
active: True to enable the mod, False to disable it.
Returns:
True on success, False otherwise.
"""
...
def setPriority(self, name: str, priority: int) -> bool:
"""
Change the priority of a mod.
`priority` is the new priority after the move. Keep in mind that the mod disappears from its
old location and all mods with higher priority than the moved mod decrease in priority by one.
Args:
name: Internal name of the mod.
priority: The new priority of the mod.
Returns:
True if the priority was changed, False otherwise (if the name or priority were invalid).
"""
...
def state(self, name: str) -> int:
"""
Retrieve the state of a mod.
Args:
name: Internal name of the mod.
Returns:
The state of the given mod.
"""
...
class IModRepositoryBridge(PyQt5.QtCore.QObject):
descriptionAvailable: PyQt5.QtCore.pyqtSignal = ...
filesAvailable: PyQt5.QtCore.pyqtSignal = ...
fileInfoAvailable: PyQt5.QtCore.pyqtSignal = ...
downloadURLsAvailable: PyQt5.QtCore.pyqtSignal = ...
endorsementsAvailable: PyQt5.QtCore.pyqtSignal = ...
endorsementToggled: PyQt5.QtCore.pyqtSignal = ...
trackedModsAvailable: PyQt5.QtCore.pyqtSignal = ...
trackingToggled: PyQt5.QtCore.pyqtSignal = ...
requestFailed: PyQt5.QtCore.pyqtSignal = ...
def _object(self) -> PyQt5.QtCore.QObject:
"""
Returns:
The underlying `QObject` for the bridge.
"""
...
def requestDescription(self, game_name: str, mod_id: int, user_data: MoVariant):
"""
Request description of a mod.
Args:
game_name: Name of the game containing the mod.
mod_id: Nexus ID of the mod.
user_data: User data to be returned with the result.
"""
...
def requestDownloadURL(
self, game_name: str, mod_id: int, file_id: int, user_data: MoVariant
):
"""
Request download URL for mod file.0
Args:
game_name: Name of the game containing the mod.
mod_id: Nexus ID of the mod.
file_id: ID of the file for which a URL should be returned.
user_data: User data to be returned with the result.
"""
...
def requestFileInfo(
self, game_name: str, mod_id: int, file_id: int, user_data: MoVariant
):
"""
Args:
game_name: Name of the game containing the mod.
mod_id: Nexus ID of the mod.
file_id: ID of the file for which information is requested.
user_data: User data to be returned with the result.
"""
...
def requestFiles(self, game_name: str, mod_id: int, user_data: MoVariant):
"""
Request the list of files belonging to a mod.
Args:
game_name: Name of the game containing the mod.
mod_id: Nexus ID of the mod.
user_data: User data to be returned with the result.
"""
...
def requestToggleEndorsement(
self,
game_name: str,
mod_id: int,
mod_version: str,
endorse: bool,
user_data: MoVariant,
):
"""
Args:
game_name: Name of the game containing the mod.
mod_id: Nexus ID of the mod.
mod_version: Version of the mod.
endorse:
user_data: User data to be returned with the result.
"""
...
class IOrganizer:
"""
Interface to class that provides information about the running session
of Mod Organizer to be used by plugins.
"""
def appVersion(self) -> "VersionInfo":
"""
Returns:
The running version of Mod Organizer.
"""
...
def basePath(self) -> str:
"""
Returns:
The absolute path to the base directory of Mod Organizer.
"""
...
def createMod(self, name: "GuessedString") -> "IModInterface":
"""
Create a new mod with the specified name.
If a mod with the same name already exists, the user will be queried. If the user chooses
to merge or replace, the call will succeed, otheriwse the call will fail.
Args:
name: Name of the mod to create.
Returns:
An interface to the newly created mod that can be used to modify it, or `None` if the mod
could not be created.
"""
...
def createNexusBridge(self) -> "IModRepositoryBridge":
"""
Create a new Nexus interface.
Returns:
The newly created Nexus interface.
"""
...
def downloadManager(self) -> "IDownloadManager":
"""
Returns:
The interface to the download manager.
"""
...
def downloadsPath(self) -> str:
"""
Returns:
The absolute path to the download directory.
"""
...
def findFileInfos(
self, path: str, filter: Callable[["FileInfo"], bool]
) -> List["FileInfo"]:
"""
Find files in the virtual directory matching the specified filter.
Args:
path: The path to search in (relative to the 'data' folder).
filter: The function to use to filter files. Should return True for the files to keep.
Returns:
The list of `QFileInfo` corresponding to the matching files.
"""
...
@overload
def findFiles(self, path: str, filter: Callable[[str], bool]) -> List[str]:
"""
Find files in the given folder that matches the given filter.
Args:
path: The path to search in (relative to the 'data' folder).
filter: The function to use to filter files. Should return True for the files to keep.
Returns:
The list of matching files.
"""
...
@overload
def findFiles(self, path: str, patterns: List[str]) -> List[str]:
"""
Find files in the given folder that matches one of the given glob patterns.
Args:
path: The path to search in (relative to the 'data' folder).
patterns: List of glob patterns to match against.
Returns:
The list of matching files.
"""
...
@overload
def findFiles(self, path: str, pattern: str) -> List[str]:
"""
Find files in the given folder that matches the given glob pattern.
Args:
path: The path to search in (relative to the 'data' folder).
pattern: The glob pattern to use to filter files.
Returns:
The list of matching files.
"""
...
def getFileOrigins(self, filename: str) -> List[str]:
"""
Retrieve the file origins for the speicified file.
The origins are listed with their internal name. The internal name of a mod can differ
from the display name for disambiguation.
Args:
filename: Path to the file to retrieve origins for (relative to the 'data' folder).
Returns:
The list of origins that contain the specified file, sorted by their priority.
"""
...
def getGame(self, name: str) -> "IPluginGame":
"""
Retrieve the game plugin matching the given name.
Args:
name: Name of the game (short name).
Returns:
The plugin for the given game, or `None` if none was found.
"""
...
def getMod(self, name: str) -> "IModInterface":
"""
Retrieve an interface to a mod using its name.
Args:
name: Name of the mod to retrieve.
Returns:
An interface to the given mod, or `None` if there is no mod with this name
"""
...
def installMod(self, filename: str, name_suggestion: str = "") -> "IModInterface":
"""
Install a mod archive at the specified location.
Args:
filename: Absolute filepath to the archive to install.
name_suggestion: Suggested name for this mod. This can still be changed by the user.
Returns:
An interface to the new installed mod, or `None` if no installation took place (canceled or failure).
"""
...
def listDirectories(self, directory: str) -> List[str]:
"""
Retrieve the list of (virtual) subdirectories in the given path.
Args:
directory: Path to the directory to list (relative to the 'data' folder).
Returns:
The list of directories in the given directory.
"""
...
def managedGame(self) -> "IPluginGame":
"""
Returns:
The plugin corresponding to the current game.
"""
...
def modDataChanged(self, mod: "IModInterface"):
"""
Notify the organizer that the given mod has changed.
Args:
mod: The mod that has changed.
"""
...
def modList(self) -> "IModList":
"""
Returns:
The interface to the mod list.
"""
...
def modsPath(self) -> str:
"""
Returns:
The (absolute) path to the mods directory.
"""
...
def modsSortedByProfilePriority(self) -> List[str]:
"""
Returns:
The list of mod (names), sorted according to the current profile priorities.
"""
...
def onAboutToRun(self, callback: Callable[[str], bool]) -> bool:
"""
Install a new handler to be called when an application is about to run.
Multiple handlers can be installed. If any of the handler returns `False`, the application will
not run.
Args:
callback: The function to call when an application is about to run. The parameter is the absolute path to the application to run. The function can return False to prevent the application from running.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onFinishedRun(self, callback: Callable[[str, int], None]) -> bool:
"""
Install a new handler to be called when an application has finished running.
Args:
callback: The function to call when an application has finished running. The first parameter is the absolute path to the application, and the second parameter is the exit code of the application.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onModInstalled(self, callback: Callable[[str], None]) -> bool:
"""
Install a new handler to be called when a new mod is installed.
Args:
callback: The function to call when a mod is installed. The parameter of the function is the name of the installed mod.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onPluginSettingChanged(
self, callback: Callable[[str, str, MoVariant, MoVariant], None]
) -> bool:
"""
Install a new handler to be called when a plugin setting is changed.
Args:
callback: The function to call when a plugin setting is changed. The parameters are: The name of the plugin, the name of the setting, the old value (or `None` if the setting did not exist before) and the new value
of the setting (or `None` if the setting has been removed).
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onProfileChanged(
self, callback: Callable[["IProfile", "IProfile"], None]
) -> bool:
"""
Install a new handler to be called when the current profile is changed.
The function is called when the profile is changed but some operations related to
the profile might not be finished when this is called (e.g., the virtual file system
might not be up-to-date).
Args:
callback: The function to call when the current profile is changed. The first parameter is the old profile (can be `None`, e.g. at startup), and the second parameter is the new profile (cannot be `None`).
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onUserInterfaceInitialized(
self, callback: Callable[[PyQt5.QtWidgets.QMainWindow], None]
) -> bool:
"""
Install a new handler to be called when the UI has been fully initialized.
Args:
callback: The function to call when the user-interface has been fully initialized. The parameter is the main window of the application (`QMainWindow`).
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def overwritePath(self) -> str:
"""
Returns:
The (absolute) path to the overwrite directory.
"""
...
def persistent(
self, plugin_name: str, key: str, default: MoVariant = None
) -> MoVariant:
"""
Retrieve the specified persistent value for a plugin.
A persistent is an arbitrary value that the plugin can set and retrieve that is persistently stored
by the main application. There is no UI for the user to change this value but they can directly access
the storage
Args:
plugin_name: Name of the plugin for which to retrieve the value. This should always be IPlugin::name() unless you have a really good reason to access data of another mod AND if you can verify that plugin is actually installed.
key: Identifier of the setting.
default: Default value to return if the key is not set (yet).
Returns:
The value corresponding to the given persistent setting, or `def` is the key is not found.
"""
...
def pluginDataPath(self) -> str:
"""
Retrieve the path to a directory where plugin data should be stored.
For python plugins, it is recommended to use a dedicated folder (per plugin) if you need to
store data (resources, or multiple python files).
Returns:
Path to a directory where plugin data should be stored.
"""
...
def pluginList(self) -> "IPluginList":
"""
Returns:
The plugin list interface.
"""
...
def pluginSetting(self, plugin_name: str, key: str) -> MoVariant:
"""
Retrieve settings of plugins.
Args:
plugin_name: Name of the plugin to retrieve the setting for.
key: Name of the setting to retrieve the value for.
Returns:
The value of the setting.
"""
...
def profile(self) -> "IProfile":
"""
Returns:
The interface to the current profile.
"""
...
def profileName(self) -> str:
"""
Returns:
The name of the current profile, or an empty string if no profile has been loaded (yet).
"""
...
def profilePath(self) -> str:
"""
Returns:
The absolute path to the active profile or an empty string if no profile has been loaded (yet).
"""
...
def refreshModList(self, save_changes: bool = True):
"""
Refresh the mod list.
Args:
save_changes: If True, the relevant profile information is saved first (enabled mods and order of mods).
"""
...
def removeMod(self, mod: "IModInterface") -> bool:
"""
Remove a mod (from disc and from the UI).
Args:
mod: The mod to remove.
Returns:
True if the mod was removed, False otherwise.
"""
...
def resolvePath(self, filename: str) -> str:
"""
Resolves a path relative to the virtual data directory to its absolute real path.
Args:
filename: Path to resolve.
Returns:
The absolute real path, or an empty string if the path was not found.
"""
...
def setPersistent(
self, plugin_name: str, key: str, value: MoVariant, sync: bool = True
):
"""
Set the specified persistent value for a plugin.
This does not update the in-memory value for this setting, see `setPluginSetting()` for this.
Args:
plugin_name: Name of the plugin for which to change a value. This should always be IPlugin::name() unless you have a really good reason to access data of another mod AND if you can verify that plugin is actually installed.
key: Identifier of the setting.
value: New value for the setting.
sync: If True, the storage is immediately written to disc. This costs performance but is safer against data loss.
"""
...
def setPluginSetting(self, plugin_name: str, key: str, value: MoVariant):
"""
Set the specified setting for a plugin.
This automatically notify handlers register with `onPluginSettingChanged`, so you do not have to do it yourself.
Args:
plugin_name: Name of the plugin for which to change a value. This should always be IPlugin::name() unless you have a really good reason to access data of another mod AND if you can verify that plugin is actually installed.
key: Identifier of the setting.
value: New value for the setting.
"""
...
def startApplication(
self,
executable: str,
args: List[str] = [],
cwd: str = "",
profile: str = "",
forcedCustomOverwrite: str = "",
ignoreCustomOverwrite: bool = False,
) -> int:
"""
Starts an application with virtual filesystem active.
Args:
executable: Name or path of the executable. If this is only a filename, it will only work if it has been configured in MO as an executable. If it is a relative path it is expected to be relative to the game directory.
args: Arguments to pass to the executable. If the list is empty, and `executable` refers to a configured executable, the configured arguments are used.
cwd: The working directory for the executable. If this is empty, the path to the executable is used unless `executable` referred to a configured MO executable, in which case the configured cwd is used.
profile: Profile to use. If this is empty (the default) the current profile is used.
forcedCustomOverwrite: The mod to set as the custom overwrite, regardless of what the profile has configured.
ignoreCustomOverwrite: Set to true to ignore the profile's configured custom overwrite.
Returns:
The handle to the started application, or 0 if the application failed to start.
"""
...
def waitForApplication(self, handle: int) -> Tuple[bool, int]:
"""
Wait for the application corresponding to the given handle to finish.
This will always show the lock overlay, regardless of whether the
user has disabled locking in the setting, so use this with care.
Note that the lock overlay will always allow the user to unlock, in
which case this will return False.
Args:
handle: Handle of the application to wait for (as returned by `startApplication()`).
Returns:
A tuple `(result, exitcode)`, where `result` is a boolean indicating if the application
completed successfully, and `exitcode` is the exit code of the application.
"""
...
class IPlugin(abc.ABC):
"""
Base class for all plugins.
"""
def __init__(self): ...
@abc.abstractmethod
def author(self) -> str:
"""
Returns:
The name of the plugin author.
"""
...
@abc.abstractmethod
def description(self) -> str:
"""
Returns:
The description for this plugin.
"""
...
@abc.abstractmethod
def init(self, organizer: "IOrganizer") -> bool:
"""
Initialize this plugin.
Args:
organizer: The main organizer interface.
Returns:
True if the plugin was initialized correctly, False otherwise.
"""
...
@abc.abstractmethod
def isActive(self) -> bool:
"""
Check if this plugin is active.
It is possible to use a plugin setting (specified in `settings()`) here to allow
users to manually enable/disable a plugin.
Returns:
True if this plugin is active, False otherwise.
"""
...
@abc.abstractmethod
def name(self) -> str:
"""
Retrieve the name of the plugin.
The name of the plugin is used for internal storage purpose so it should not change,
and it should be static. In particular, you should not use a localized string (`tr()`)
for the plugin name.
In the future, we will provide a way to localized plugin names using a distinct method,
such as `localizedName()`.
Returns:
The name of the plugin.
"""
...
@abc.abstractmethod
def settings(self) -> List["PluginSetting"]:
"""
Returns:
A list of settings for this plugin.
"""
...
@abc.abstractmethod
def version(self) -> "VersionInfo":
"""
Returns:
The version of this plugin.
"""
...
class IPluginDiagnose(IPlugin):
"""
Plugins that create problem reports to be displayed in the UI.
This can be used to report problems related to the same plugin (which implements further
interfaces) or as a stand-alone diagnosis tool.
"""
def __init__(self): ...
def _invalidate(self):
"""
Invalidate the problems corresponding to this plugin.
"""
...
@abc.abstractmethod
def activeProblems(self) -> List[int]:
"""
Retrieve the list of active problems found by this plugin.
This method returns a list of problem IDs, that are then used when calling other methods
such as `shortDescription()` or `hasGuidedFix()`.
Returns:
The list of active problems for this plugin.
"""
...
@abc.abstractmethod
def fullDescription(self, key: int) -> str:
"""
Retrieve the full description of the problem corresponding to the given key.
Args:
key: ID of the problem.
Returns:
The full description of the problem.
Raises:
IndexError: If the key is not valid.
"""
...
@abc.abstractmethod
def hasGuidedFix(self, key: int) -> bool:
"""
Check if the problem corresponding to the given key has a guided fix.
Args:
key: ID of the problem.
Returns:
True if there is a guided fix for the problem, False otherwise.
Raises:
IndexError: If the key is not valid.
"""
...
@abc.abstractmethod
def shortDescription(self, key: int) -> str:
"""
Retrieve the short description of the problem corresponding to the given key.
Args:
key: ID of the problem.
Returns:
The short description of the problem.
Raises:
IndexError: If the key is not valid.
"""
...
@abc.abstractmethod
def startGuidedFix(self, key: int):
"""
Starts a guided fix for the problem corresponding to the given key.
This method should throw `ValueError` if there is no guided fix for the corresponding
problem.
Args:
key: ID of the problem.
Raises:
IndexError: If the key is not valid.
ValueError: If there is no guided fix for this problem.
"""
...
class IPluginFileMapper(IPlugin):
"""
Plugins that adds virtual file links.
"""
def __init__(self): ...
@abc.abstractmethod
def mappings(self) -> List["Mapping"]:
"""
Returns:
Mapping for the virtual file system (VFS).
"""
...
class IPluginGame(IPlugin):
"""
Base classes for game plugins.
Each game requires a specific game plugin. These plugins were initially designed for
Bethesda games, so a lot of methods and attributes are irrelevant for other games. If
you wish to write a plugin for a much simpler game, please consider the `basic_games`
plugin: https://github.com/ModOrganizer2/modorganizer-basic_games
"""
def __init__(self): ...
@abc.abstractmethod
def CCPlugins(self) -> List[str]:
"""
Returns:
The current list of active Creation Club plugins.
"""
...
@abc.abstractmethod
def DLCPlugins(self) -> List[str]:
"""
Returns:
The list of esp/esm files that are part of known DLCs.
"""
...
@abc.abstractmethod
def binaryName(self) -> str:
"""
Returns:
The name of the default executable to run (relative to the game folder).
"""
...
@abc.abstractmethod
def dataDirectory(self) -> PyQt5.QtCore.QDir:
"""
Returns:
The name of the directory containing data (relative to the game folder).
"""
...
@abc.abstractmethod
def documentsDirectory(self) -> PyQt5.QtCore.QDir:
"""
Returns:
The directory of the documents folder where configuration files and such for this game reside.
"""
...
@abc.abstractmethod
def executableForcedLoads(self) -> List["ExecutableForcedLoadSetting"]:
"""
Returns:
A list of automatically discovered libraries that can be force loaded with executables.
"""
...
@abc.abstractmethod
def executables(self) -> List["ExecutableInfo"]:
"""
Returns:
A list of automatically discovered executables of the game itself and tools surrounding it.
"""
...
def feature(self, feature_type: Type[GameFeatureType]) -> GameFeatureType:
"""
Retrieve a specified game feature from this plugin.
Args:
feature_type: The class of feature to retrieve.
Returns:
The game feature corresponding to the given type, or `None` if the feature is
not implemented.
"""
...
def featureList(self) -> Dict[Type[GameFeatureType], GameFeatureType]:
"""
Retrieve the list of game features implemented for this plugin.
Python plugin should not implement this method but `_featureList()`.
Returns:
A mapping from feature type to actual game features.
"""
...
@abc.abstractmethod
def gameDirectory(self) -> PyQt5.QtCore.QDir:
"""
Returns:
The directory containing the game installation.
"""
...
@abc.abstractmethod
def gameIcon(self) -> PyQt5.QtGui.QIcon:
"""
Returns:
The icon representing the game.
"""
...
@abc.abstractmethod
def gameName(self) -> str:
"""
Returns:
The name of the game (as displayed to the user).
"""
...
@abc.abstractmethod
def gameNexusName(self) -> str:
"""
Returns:
The name of the game identifier for Nexus.
"""
...
@abc.abstractmethod
def gameShortName(self) -> str:
"""
Returns:
The short name of the game.
"""
...
@abc.abstractmethod
def gameVariants(self) -> List[str]:
"""
Retrieve the list of variants for this game.
If there are multiple variants of a game (and the variants make a difference to the
plugin), like a regular one and a GOTY-edition, the plugin can return a list of them
and the user gets to chose which one he owns.
Returns:
The list of variants of the game.
"""
...
@abc.abstractmethod
def gameVersion(self) -> str:
"""
Returns:
The version of the game.
"""
...
@abc.abstractmethod
def getLauncherName(self) -> str:
"""
Returns:
The name of the launcher executable to run (relative to the game folder), or an
empty string if there is no launcher.
"""
...
@abc.abstractmethod
def iniFiles(self) -> List[str]:
"""
Returns:
The list of INI files this game uses. The first file in the list should be the
'main' INI file.
"""
...
@abc.abstractmethod
def initializeProfile(self, directory: PyQt5.QtCore.QDir, settings: int):
"""
Initialize a profile for this game.
The MO app does not yet support virtualizing only specific aspects but plugins should be written
with this future functionality in mind.
This function will be used to initially create a profile, potentially to repair it or upgrade/downgrade
it so the implementations have to gracefully handle the case that the directory already contains files.
Args:
directory: The directory where the profile is to be initialized.
settings: The parameters for how the profile should be initialized.
"""
...
@abc.abstractmethod
def isInstalled(self) -> bool:
"""
Returns:
True if this game has been discovered as installed, False otherwise.
"""
...
@abc.abstractmethod
def loadOrderMechanism(self) -> "LoadOrderMechanism":
"""
Returns:
The load order mechanism used by this game.
"""
...
@abc.abstractmethod
def looksValid(self, directory: PyQt5.QtCore.QDir) -> bool:
"""
Check if the given directory looks like a valid game installation.
Args:
directory: Directory to check.
Returns:
True if the directory looks like a valid installation of this game, False otherwise.
"""
...
@abc.abstractmethod
def nexusGameID(self) -> int:
"""
Retrieve the Nexus game ID for this game.
Example: For Skyrim, the Nexus game ID is 110.
Returns:
The Nexus game ID for this game.
"""
...
@abc.abstractmethod
def nexusModOrganizerID(self) -> int:
"""
Retrieve the Nexus mod ID of Mod Organizer for this game.
Example: For Skyrim SE, the mod ID of MO2 is 6194. You can find the mod ID in the URL:
https://www.nexusmods.com/skyrimspecialedition/mods/6194
Returns:
The Nexus mod ID of Mod Organizer for this game.
"""
...
@abc.abstractmethod
def primaryPlugins(self) -> List[str]:
"""
Returns:
The list of plugins that are part of the game and not considered optional.
"""
...
@abc.abstractmethod
def primarySources(self) -> List[str]:
"""
Retrieve primary alternative 'short' names for this game.
This is used to determine if a Nexus (or other) download source should be considered
as a primary source for the game so that it is not flagged as an alternative one.
Returns:
The list of primary alternative 'short' names for this game, or an empty list.
"""
...
@abc.abstractmethod
def savegameExtension(self) -> str:
"""
Returns:
The file extension of save games for this game.
"""
...
@abc.abstractmethod
def savegameSEExtension(self) -> str:
"""
Returns:
The file extension of Script Extender saves for this game.
"""
...
@abc.abstractmethod
def savesDirectory(self) -> PyQt5.QtCore.QDir:
"""
Returns:
The directory where save games are stored.
"""
...
@abc.abstractmethod
def setGamePath(self, path: str):
"""
Set the path to the managed game.
This is called during instance creation if the game is not auto-detected and the user has
to specify the installation location. This is not called if the game has been auto-detected,
so `isInstalled()` should call this.
Args:
path: Path to the game installation.
"""
...
@abc.abstractmethod
def setGameVariant(self, variant: str):
"""
Set the game variant.
If there are multiple variants of game (as returned by `gameVariants()`), this will be
called on start with the user-selected game variant.
Args:
variant: The game variant selected by the user.
"""
...
@abc.abstractmethod
def sortMechanism(self) -> "SortMechanism":
"""
Returns:
The sort mechanism for this game.
"""
...
@abc.abstractmethod
def steamAPPId(self) -> str:
"""
Retrieve the Steam app ID for this game.
If the game is not available on Steam, this should return an empty string.
If a game is available in multiple versions, those might have different app ids. The plugin
should try to return the right one
Returns:
The Steam app ID for this game. Should be empty for games not available on steam.
"""
...
@abc.abstractmethod
def validShortNames(self) -> List[str]:
"""
Retrieve the valid 'short' names for this game.
This is used to determine if a Nexus download is valid for the current game since not all
game variants have their own nexus pages and others can handle downloads from other nexus
game pages and should be allowed to do so (e.g., you can install some Skyrim LE mod even
when using Skyrim SE).
The short name should be considered the primary handler for a directly supported game
for puroses of auto-launching an instance.
Returns:
The list of valid short names for this game.
"""
...
class IPluginInstaller(IPlugin):
"""
This is the top-level class for installer. Actual installers should inherit either:
- `IPluginInstallerSimple` if the installer can work directly with the archive. This is what
most installers use.
- `IPluginInstallerCustom` if the installer needs to perform custom operations. This is only
used by the external NCC installer and the OMOD installer.
"""
@abc.abstractmethod
def isArchiveSupported(self, tree: "IFileTree") -> bool:
"""
Check if the given file tree corresponds to a supported archive for this installer.
Args:
tree: The tree representing the content of the archive.
Returns:
True if this installer can handle the archive, False otherwise.
"""
...
@abc.abstractmethod
def isManualInstaller(self) -> bool:
"""
Check if this installer is a manual installer.
Returns:
True if this installer is a manual installer, False otherwise.
"""
...
@abc.abstractmethod
def priority(self) -> int:
"""
Retrieve the priority of this installer.
If multiple installers are able to handle an archive, the one with the highest priority wins.
Returns:
The priority of this installer.
"""
...
def setInstallationManager(self, manager: "IInstallationManager"):
"""
Set the installation manager for this installer.
Python plugins usually do not need to re-implement this and can directly access the installation
manager using `_manager()`.
Args:
manager: The installation manager.
"""
...
def setParentWidget(self, parent: PyQt5.QtWidgets.QWidget):
"""
Set the parent widget for this installer.
Python plugins usually do not need to re-implement this and can directly access the parent
widget using `_parentWidget()` once the UI has been initialized.
Args:
parent: The parent widget.
"""
...
class IPluginInstallerCustom(IPluginInstaller):
"""
Custom installer for mods. Custom installers receive the archive name and have to go
from there. They have to be able to extract the archive themself.
Example of such installers are the external NCC installer or the OMOD installer.
"""
def __init__(self): ...
def _manager(self) -> "IInstallationManager":
"""
Returns:
The installation manager.
"""
...
def _parentWidget(self) -> PyQt5.QtWidgets.QWidget:
"""
Returns:
The parent widget.
"""
...
@abc.abstractmethod
def install(
self,
mod_name: "GuessedString",
game_name: str,
archive_name: str,
version: str,
nexus_id: int,
) -> "InstallResult":
"""
Install the given archive.
The mod needs to be created by calling `IOrganizer.createMod` first.
Args:
mod_name: Name of the mod to install. As an input parameter this is the suggested name (e.g. from meta data) The installer may change this parameter to rename the mod).
game_name: Name of the game for which the mod is installed.
archive_name: Name of the archive to install.
version: Version of the mod. May be empty if the version is not yet known. The plugin is responsible for setting the version on the created mod.
nexus_id: ID of the mod or -1 if unknown. The plugin is responsible for setting the mod ID for the created mod.
Returns:
The result of the installation process.
"""
...
@overload
@abc.abstractmethod
def isArchiveSupported(self, tree: "IFileTree") -> bool:
"""
Check if the given file tree corresponds to a supported archive for this installer.
Args:
tree: The tree representing the content of the archive.
Returns:
True if this installer can handle the archive, False otherwise.
"""
...
@overload
@abc.abstractmethod
def isArchiveSupported(self, archive_name: str) -> bool:
"""
Check if the given file is a supported archive for this installer.
Args:
archive_name: Name of the archive.
Returns:
True if this installer can handle the archive, False otherwise.
"""
...
@abc.abstractmethod
def supportedExtensions(self) -> List[str]:
"""
Returns:
A list of file extensions that this installer can handle.
"""
...
class IPluginInstallerSimple(IPluginInstaller):
"""
Simple installer for mods. Simple installers only deal with an in-memory structure
representing the archive and can modify what to install and where by editing this structure.
Actually extracting the archive is handled by the manager.
"""
def __init__(self): ...
def _manager(self) -> "IInstallationManager":
"""
Returns:
The installation manager.
"""
...
def _parentWidget(self) -> PyQt5.QtWidgets.QWidget:
"""
Returns:
The parent widget.
"""
...
@abc.abstractmethod
def install(
self, name: "GuessedString", tree: "IFileTree", version: str, nexus_id: int
) -> Union[
"InstallResult", "IFileTree", Tuple["InstallResult", "IFileTree", str, int]
]:
"""
Install a mod from an archive filetree.
The installer can modify the given tree and use the manager to extract or create new
files.
This method returns different type of objects depending on the actual result of the
installation. The C++ bindings for this method always returns a tuple (result, tree,
version, id).
Args:
name: Name of the mod to install. As an input parameter this is the suggested name (e.g. from meta data) The installer may change this parameter to rename the mod).
tree: In-memory representation of the archive content.
version: Version of the mod, or an empty string is unknown.
nexus_id: ID of the mod, or -1 if unknown.
Returns:
In case of failure, the result of the installation, otherwise the modified tree or
a tuple (result, tree, version, id) containing the result of the installation, the
modified tree, the new version and the new ID. The tuple can be returned even if the
installation did not succeed.
"""
...
class IPluginList:
"""
Primary interface to the list of plugins.
"""
def isMaster(self, name: str) -> bool:
"""
Check if a plugin is a master file (basically a library, referenced by other plugins).
In gamebryo games, a master file will usually have a .esm file extension but technically
an esp can be flagged as master and an esm might not be.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
True if the given plugin is a master plugin, False otherwise or if the file does not exist.
"""
...
def loadOrder(self, name: str) -> int:
"""
Retrieve the load order of a plugin.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
The load order of the plugin (the order in which the game loads it). If all plugins are enabled this
is the same as the priority but disabled plugins will have a load order of -1. This also returns -1
if the plugin does not exist.
"""
...
def masters(self, name: str) -> List[str]:
"""
Retrieve the list of masters required for a plugin.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
The list of masters for the plugin (filenames with extension, no path).
"""
...
def onPluginMoved(self, callback: Callable[[str, int, int], None]) -> bool:
"""
Install a new handler to be called when a plugin is moved.
Args:
callback: The function to call when a plugin is moved. The first parameter is the plugin name, the second the old priority of the plugin and the third one the new priority.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def onRefreshed(self, callback: Callable[[None], None]) -> bool:
"""
Install a new handler to be called when the list of plugins is refreshed.
Args:
callback: The function to call when the list of plugins is refreshed.
Returns:
True if the handler was installed properly (there are currently no reasons for this to fail).
"""
...
def origin(self, name: str) -> str:
"""
Retrieve the origin of a plugin. This is either the (internal) name of a mod, `"overwrite"` or `"data"`.
The internal name of a mod can differ from the display name for disambiguation.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
The name of the origin of the plugin, or an empty string if the plugin does not exist.
"""
...
def pluginNames(self) -> List[str]:
"""
Returns:
The list of all plugin names.
"""
...
def priority(self, name: str) -> int:
"""
Retrieve the priority of a plugin.
The higher the priority, the more important.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
The priority of the given plugin, or -1 if the plugin does not exist.
"""
...
def setLoadOrder(self, loadorder: List[str]):
"""
Set the load order.
Plugins not included in the list will be placed at highest priority in the order they
were before.
Args:
loadorder: The new load order, specified by the list of plugin names, sorted.
"""
...
def setState(self, name: str, state: int):
"""
Set the state of a plugin.
Args:
name: Filename of the plugin (without path but with file extension).
state: New state of the plugin (`INACTIVE` or `ACTIVE`).
"""
...
def state(self, name: str) -> int:
"""
Retrieve the state of a plugin.
Args:
name: Filename of the plugin (without path but with file extension).
Returns:
The state of the plugin.
"""
...
class IPluginModPage(IPlugin):
def __init__(self): ...
@abc.abstractmethod
def _parentWidget(self) -> PyQt5.QtWidgets.QWidget:
"""
Returns:
The parent widget.
"""
...
@abc.abstractmethod
def displayName(self) -> str:
"""
Returns:
The name of the page as displayed in the UI.
"""
...
@abc.abstractmethod
def handlesDownload(
self,
page_url: PyQt5.QtCore.QUrl,
download_url: PyQt5.QtCore.QUrl,
fileinfo: "ModRepositoryFileInfo",
) -> bool:
"""
Check if the plugin handles the specified download.
Args:
page_url: URL of the page that contains the download link.
download_url: The download URL.
fileinfo: Not usable in python.
Returns:
True if this plugin wants to handle the specified download, False otherwise.
"""
...
@abc.abstractmethod
def icon(self) -> PyQt5.QtGui.QIcon:
"""
Returns:
The icon to display with the page.
"""
...
@abc.abstractmethod
def pageURL(self) -> PyQt5.QtCore.QUrl:
"""
Returns:
The URL to open when the user wants to visit this mod page.
"""
...
def setParentWidget(self, parent: PyQt5.QtWidgets.QWidget):
"""
Set the parent widget for this mod page.
Python plugins usually do not need to re-implement this and can directly access the parent
widget using `_parentWidget()` once the UI has been initialized.
Args:
parent: The parent widget.
"""
...
@abc.abstractmethod
def useIntegratedBrowser(self) -> bool:
"""
Indicates if the page should be displayed in the integrated browser.
Unless the page provides a special means of starting downloads (like the nxm:// url schema
on nexus), it will not be possible to handle downloads unless the integrated browser is used!
Returns:
True if the page should be opened in the integrated browser, False otherwise.
"""
...
class IPluginPreview(IPlugin):
"""
These plugins add support for previewing files in the data pane. Right now all image formats supported
by qt are implemented (including dds) but no audio files and no 3d mesh formats.
"""
def __init__(self): ...
@abc.abstractmethod
def genFilePreview(
self, filename: str, max_size: PyQt5.QtCore.QSize
) -> PyQt5.QtWidgets.QWidget:
"""
Generate a preview for the specified file.
Args:
filename: Path to the file to preview.
max_size: Maximum size of the generated widget.
Returns:
The widget showing a preview of the file.
"""
...
@abc.abstractmethod
def supportedExtensions(self) -> List[str]:
"""
Returns:
The list of file extensions that are supported by this preview plugin.
"""
...
class IPluginTool(IPlugin):
"""
This is the simplest of plugin interfaces. Such plugins simply place an icon inside the tools submenu
and get invoked when the user clicks it. They are expected to have a user interface of some sort. These
are almost like independent applications except they can access all Mod Organizer interfaces like querying
and modifying the current profile, mod list, load order, use MO to install mods and so on. A tool plugin
can (and should!) integrate its UI as a window inside MO and thus doesn't have to initialize a windows
application itself.
"""
def __init__(self): ...
@abc.abstractmethod
def _parentWidget(self) -> PyQt5.QtWidgets.QWidget:
"""
Returns:
The parent widget.
"""
...
@abc.abstractmethod
def display(self):
"""
Called when the user starts the tool.
"""
...
@abc.abstractmethod
def displayName(self) -> str:
"""
Returns:
The display name for this tool, as shown in the tool menu.
"""
...
@abc.abstractmethod
def icon(self) -> PyQt5.QtGui.QIcon:
"""
Returns:
The icon for this tool, or a default-constructed QICon().
"""
...
def setParentWidget(self, parent: PyQt5.QtWidgets.QWidget):
"""
Set the parent widget for this tool.
Python plugins usually do not need to re-implement this and can directly access the parent
widget using `_parentWidget()` once the UI has been initialized.
Args:
parent: The parent widget.
"""
...
@abc.abstractmethod
def tooltip(self) -> str:
"""
Returns:
The tooltip for this tool.
"""
...
class IProfile:
def absolutePath(self) -> str: ...
def invalidationActive(self) -> Tuple[bool, bool]: ...
def localSavesEnabled(self) -> bool: ...
def localSettingsEnabled(self) -> bool: ...
def name(self) -> str: ...
class ISaveGame:
"""
Base class for information about what is in a save game.
"""
def __init__(self): ...
def allFiles(self) -> List[str]:
"""
Returns:
The list of all files related to this save.
"""
...
def getCreationTime(self) -> PyQt5.QtCore.QDateTime:
"""
Retrieve the creation time of the save.
The creation time of a save is not always the same as the creation time of
the file containing the save.
Returns:
The creation time of the save.
"""
...
def getFilename(self) -> str:
"""
Returns:
The name of the (main) save file.
"""
...
def getSaveGroupIdentifier(self) -> str:
"""
Retrieve the name of the group this files belong to.
The name can be used to identify sets of saves to transfer between profiles. For
RPG games, this is usually the name of a character.
Returns:
The group identifier for this save game.
"""
...
def hasScriptExtenderFile(self) -> bool:
"""
Returns:
True if this save game has an associated script extender save, False otherwise.
"""
...
class ISaveGameInfoWidget(PyQt5.QtWidgets.QWidget):
"""
Base class for a save game info widget.
"""
def __init__(self, parent: PyQt5.QtWidgets.QWidget = None):
"""
Args:
parent: Parent widget.
"""
...
def _widget(self) -> PyQt5.QtWidgets.QWidget:
"""
Returns:
The underlying `QWidget`.
"""
...
@abc.abstractmethod
def setSave(self, save: str):
"""
Set the save file to display in this widget.
Args:
save: Path to the save file.
"""
...
class LocalSavegames(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def mappings(self, profile_save_dir: PyQt5.QtCore.QDir) -> List["Mapping"]: ...
@abc.abstractmethod
def prepareProfile(self, profile: "IProfile") -> bool: ...
class Mapping:
@property
def createTarget(self) -> bool: ...
@createTarget.setter
def createTarget(self, arg0: bool): ...
@property
def destination(self) -> str: ...
@destination.setter
def destination(self, arg0: str): ...
@property
def isDirectory(self) -> bool: ...
@isDirectory.setter
def isDirectory(self, arg0: bool): ...
@property
def source(self) -> str: ...
@source.setter
def source(self, arg0: str): ...
def __init__(self):
"""
Creates an empty Mapping.
"""
...
class ModDataChecker(abc.ABC):
"""
Game feature that is used to check the content of a data tree.
"""
class CheckReturn(Enum):
INVALID = ...
FIXABLE = ...
VALID = ...
def __and__(self, other: int) -> bool: ...
def __or__(self, other: int) -> bool: ...
def __rand__(self, other: int) -> bool: ...
def __ro__(self, other: int) -> bool: ...
FIXABLE: "ModDataChecker.CheckReturn" = ...
INVALID: "ModDataChecker.CheckReturn" = ...
VALID: "ModDataChecker.CheckReturn" = ...
def __init__(self): ...
@abc.abstractmethod
def dataLooksValid(self, filetree: "IFileTree") -> "ModDataChecker.CheckReturn":
"""
Check that the given filetree represent a valid mod layout, or can be easily
fixed.
This method is mainly used during installation (to find which installer should
be used or to recurse into multi-level archives), or to quickly indicates to a
user if a mod looks valid.
This method does not have to be exact, it only has to indicate if the given tree
looks like a valid mod or not by quickly checking the structure (heavy operations
should be avoided).
If the tree can be fixed by the `fix()` method, this method should return `FIXABLE`.
`FIXABLE` should only be returned when it is guaranteed that `fix()` can fix the tree.
Args:
filetree: The tree starting at the root of the "data" folder.
Returns:
Whether the tree is invalid, fixable or valid.
"""
...
def fix(self, filetree: "IFileTree") -> Optional["IFileTree"]:
"""
Try to fix the given tree.
This method is used during installation to try to fix invalid archives and will only be
called if dataLooksValid returned `FIXABLE`.
Args:
filetree: The tree to try to fix. Can be modified during the process.
Returns:
The fixed tree, or a null pointer if the tree could not be fixed.
"""
...
class ModDataContent(abc.ABC):
"""
The ModDataContent feature is used (when available) to indicate to users the content
of mods in the "Content" column.
The feature exposes a list of possible content types, each associated with an ID, a name
and an icon. The icon is the path to either:
- A Qt resource or;
- A file on the disk.
In order to facilitate the implementation, MO2 already provides a set of icons that can
be used. Those icons are all under ``:/MO/gui/content`` (e.g. ``:/MO/gui/content/plugin`` or ``:/MO/gui/content/music`` `).
The list of available icons is:
- ``plugin``: |plugin-icon|
- ``skyproc``: |skyproc-icon|
- ``texture``: |texture-icon|
- ``music``: |music-icon|
- ``sound``: |sound-icon|
- ``interface``: |interface-icon|
- ``skse``: |skse-icon|
- ``script``: |script-icon|
- ``mesh``: |mesh-icon|
- ``string``: |string-icon|
- ``bsa``: |bsa-icon|
- ``menu``: |menu-icon|
- ``inifile``: |inifile-icon|
- ``modgroup``: |modgroup-icon|
.. |plugin-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/jigsaw-piece.png
.. |skyproc-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/hand-of-god.png
.. |texture-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/empty-chessboard.png
.. |music-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/double-quaver.png
.. |sound-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/lyre.png
.. |interface-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/usable.png
.. |skse-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/checkbox-tree.png
.. |script-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/tinker.png
.. |mesh-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/breastplate.png
.. |string-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/conversation.png
.. |bsa-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/locked-chest.png
.. |menu-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/config.png
.. |inifile-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/feather-and-scroll.png
.. |modgroup-icon| image:: https://raw.githubusercontent.com/ModOrganizer2/modorganizer/master/src/resources/contents/xedit.png
"""
class Content:
@property
def icon(self) -> str: ...
@property
def id(self) -> int: ...
@property
def name(self) -> str: ...
def __init__(self, id: int, name: str, icon: str, filter_only: bool = False):
"""
Args:
id: ID of this content.
name: Name of this content.
icon: Path to the icon for this content. Can be either a path to an image on the disk, or to a resource. Can be an empty string if filterOnly
is true.
filter_only: Indicates if the content should only be show in the filter criteria and not in the actual Content column.
"""
...
def isOnlyForFilter(self) -> bool:
"""
Returns:
True if this content is only meant to be used as a filter criteria.
"""
...
def __init__(self): ...
@abc.abstractmethod
def getAllContents(self) -> List["ModDataContent.Content"]:
"""
Returns:
The list of all possible contents for the corresponding game.
"""
...
@abc.abstractmethod
def getContentsFor(self, filetree: "IFileTree") -> List[int]:
"""
Retrieve the list of contents in the given tree.
Args:
filetree: The tree corresponding to the mod to retrieve contents for.
Returns:
The IDs of the content in the given tree.
"""
...
class ModRepositoryFileInfo:
@property
def categoryID(self) -> int: ...
@categoryID.setter
def categoryID(self, arg0: int): ...
@property
def description(self) -> str: ...
@description.setter
def description(self, arg0: str): ...
@property
def fileCategory(self) -> int: ...
@fileCategory.setter
def fileCategory(self, arg0: int): ...
@property
def fileID(self) -> int: ...
@fileID.setter
def fileID(self, arg0: int): ...
@property
def fileName(self) -> str: ...
@fileName.setter
def fileName(self, arg0: str): ...
@property
def fileSize(self) -> int: ...
@fileSize.setter
def fileSize(self, arg0: int): ...
@property
def fileTime(self) -> PyQt5.QtCore.QDateTime: ...
@fileTime.setter
def fileTime(self, arg0: PyQt5.QtCore.QDateTime): ...
@property
def gameName(self) -> str: ...
@gameName.setter
def gameName(self, arg0: str): ...
@property
def modID(self) -> int: ...
@modID.setter
def modID(self, arg0: int): ...
@property
def modName(self) -> str: ...
@modName.setter
def modName(self, arg0: str): ...
@property
def name(self) -> str: ...
@name.setter
def name(self, arg0: str): ...
@property
def newestVersion(self) -> "VersionInfo": ...
@newestVersion.setter
def newestVersion(self, arg0: "VersionInfo"): ...
@property
def repository(self) -> str: ...
@repository.setter
def repository(self, arg0: str): ...
@property
def uri(self) -> str: ...
@uri.setter
def uri(self, arg0: str): ...
@property
def userData(self) -> MoVariant: ...
@userData.setter
def userData(self, arg0: MoVariant): ...
@property
def version(self) -> "VersionInfo": ...
@version.setter
def version(self, arg0: "VersionInfo"): ...
@overload
def __init__(self, other: "ModRepositoryFileInfo"): ...
@overload
def __init__(
self, game_name: str = None, mod_id: int = None, file_id: int = None
): ...
def __str__(self) -> str: ...
@staticmethod
def createFromJson(data: str) -> "ModRepositoryFileInfo": ...
class PluginSetting:
"""
Class to hold the user-configurable parameters a plugin accepts. The purpose of this class is
only to inform the application what settings to offer to the user, it does not hold the actual value.
"""
@property
def default_value(self) -> MoVariant: ...
@default_value.setter
def default_value(self, arg0: MoVariant): ...
@property
def description(self) -> str: ...
@description.setter
def description(self, arg0: str): ...
@property
def key(self) -> str: ...
@key.setter
def key(self, arg0: str): ...
def __init__(self, key: str, description: str, default_value: MoVariant):
"""
Args:
key: Name of the setting.
description: Description of the setting.
default_value: Default value of the setting.
"""
...
class SaveGameInfo(abc.ABC):
"""
Feature to get hold of stuff to do with save games.
"""
def __init__(self): ...
@abc.abstractmethod
def getMissingAssets(self, filepath: str) -> Dict[str, List[str]]:
"""
Retrieve missing assets from the save.
Args:
filepath: Path to the save file.
Returns:
A collection of missing assets and the modules that can supply those assets.
"""
...
@abc.abstractmethod
def getSaveGameInfo(self, filepath: str) -> "ISaveGame":
"""
Retrieve the information about the supplied save game.
Args:
filepath: Path to the save file.
Returns:
A SaveGame corresponding to the given save file.
"""
...
@abc.abstractmethod
def getSaveGameWidget(
self, parent: PyQt5.QtWidgets.QWidget
) -> Optional["ISaveGameInfoWidget"]:
"""
Retrieve a widget to display over the save game list.
This method is allowed to return `None` in case no widget has been implemented.
Args:
parent: The parent widget.
Returns:
A SaveGameInfoWidget to display information about save game.
"""
...
@abc.abstractmethod
def hasScriptExtenderSave(self, filepath: str) -> bool:
"""
Check whether or not the save has a paired script extender save.
Args:
filepath: Path to the save file.
Returns:
True if the given save file has a paired script extender save, False otherwise.
"""
...
class ScriptExtender(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def BinaryName(self) -> str:
"""
Returns:
The name of the script extender binary.
"""
...
@abc.abstractmethod
def PluginPath(self) -> str:
"""
Returns:
The script extender plugin path, relative to the data folder.
"""
...
@abc.abstractmethod
def getArch(self) -> int:
"""
Returns:
The CPU platform of the extender.
"""
...
@abc.abstractmethod
def getExtenderVersion(self) -> str:
"""
Returns:
The version of the script extender.
"""
...
@abc.abstractmethod
def isInstalled(self) -> bool:
"""
Returns:
True if the script extender is installed, False otherwise.
"""
...
@abc.abstractmethod
def loaderName(self) -> str:
"""
Returns:
The loader to use to ensure the game runs with the script extender.
"""
...
@abc.abstractmethod
def loaderPath(self) -> str:
"""
Returns:
The fullpath to the script extender loader.
"""
...
@abc.abstractmethod
def saveGameAttachmentExtensions(self) -> List[str]:
"""
Returns:
Additional savegame attachments.
"""
...
class UnmanagedMods(abc.ABC):
def __init__(self): ...
@abc.abstractmethod
def displayName(self, mod_name: str) -> str:
"""
Retrieve the display name of a given mod.
Args:
mod_name: Internal name of the mod.
Returns:
The display name of the mod.
"""
...
@abc.abstractmethod
def mods(self, official_only: bool) -> List[str]:
"""
Retrieve the list of unmanaged mods for the corresponding game.
Args:
official_only: Retrieve only unmanaged official mods.
Returns:
The list of unmanaged mods (internal names).
"""
...
@abc.abstractmethod
def referenceFile(self, mod_name: str) -> PyQt5.QtCore.QFileInfo:
"""
Retrieve the reference file for the requested mod.
Example: For Bethesda games, the reference file may be the main
plugin (esp or esm) for the game or a DLCs.
Args:
mod_name: Internal name of the mod.
Returns:
The reference file (absolute path) for the requested mod.
"""
...
@abc.abstractmethod
def secondaryFiles(self, mod_name: str) -> List[str]:
"""
Retrieve the secondary files for the requested mod.
Example: For Bethesda games, the secondary files may be the archives
corresponding to the reference file.
Args:
mod_name: Internal name of the mod.
Returns:
The secondary files (absolute paths) for the request mod.
"""
...
class VersionInfo:
"""
Represents the version of a mod or plugin.
"""
@overload
def __init__(self):
"""
Construct an invalid VersionInfo.
"""
...
@overload
def __init__(self, value: str, scheme: "VersionScheme" = VersionScheme.DISCOVER):
"""
Construct a VersionInfo by parsing the given string according to the given scheme.
Args:
value: String to parse.
scheme: Scheme to use to parse the string.
"""
...
@overload
def __init__(
self,
major: int,
minor: int,
subminor: int,
subsubminor: int,
release_type: "ReleaseType" = ReleaseType.FINAL,
):
"""
Construct a VersionInfo using the given elements.
Args:
major: Major version.
minor: Minor version.
subminor: Subminor version.
subsubminor: Subsubminor version.
release_type: Type of release.
"""
...
@overload
def __init__(
self,
major: int,
minor: int,
subminor: int,
release_type: "ReleaseType" = ReleaseType.FINAL,
):
"""
Construct a VersionInfo using the given elements.
Args:
major: Major version.
minor: Minor version.
subminor: Subminor version.
release_type: Type of release.
"""
...
@overload
def __eq__(self, arg2: "VersionInfo") -> bool: ...
@overload
def __eq__(self, other: object) -> bool: ...
def __ge__(self, arg2: "VersionInfo") -> bool: ...
def __gt__(self, arg2: "VersionInfo") -> bool: ...
def __le__(self, arg2: "VersionInfo") -> bool: ...
def __lt__(self, arg2: "VersionInfo") -> bool: ...
@overload
def __ne__(self, arg2: "VersionInfo") -> bool: ...
@overload
def __ne__(self, other: object) -> bool: ...
def __str__(self) -> str:
"""
Returns:
See `canonicalString()`.
"""
...
def canonicalString(self) -> str:
"""
Returns:
A canonical string representing this version, that can be stored and then parsed using the parse() method.
"""
...
def clear(self):
"""
Resets this VersionInfo to an invalid version.
"""
...
def displayString(self, forced_segments: int = 2) -> str:
"""
Args:
forced_segments: The number of version segments to display even if the version is 0. 1 is major, 2 is major and minor, etc. The only implemented ranges are (-inf,2] for major/minor, [3] for major/minor/subminor,
and [4,inf) for major/minor/subminor/subsubminor. This only versions with a regular scheme.
Returns:
A string for display to the user. The returned string may not contain enough information to reconstruct this version info.
"""
...
def isValid(self) -> bool:
"""
Returns:
True if this VersionInfo is valid, False otherwise.
"""
...
def parse(
self,
value: str,
scheme: "VersionScheme" = VersionScheme.DISCOVER,
manual_input: bool = False,
):
"""
Update this VersionInfo by parsing the given string using the given scheme.
Args:
value: String to parse.
scheme: Scheme to use to parse the string.
manual_input: True if the given string should be treated as user input.
"""
...
def scheme(self) -> "VersionScheme":
"""
Returns:
The version scheme in effect for this VersionInfo.
"""
...
|
from .base_methods import BaseWebDriver
from .locators import LoginPageLocators, MainPageLocators
from .environment import ProdEnv
class LoginPage(BaseWebDriver):
def go_to_login_page(self):
self.find_and_click_button(*LoginPageLocators.LOGIN_PAGE)
def should_be_login_page(self):
# Check if there is a login form frame on the page available
self.should_be_login_form()
def should_be_login_form(self):
self.is_element_present(*LoginPageLocators.LOGIN_FORM)
def enter_login(self):
self.find_and_send_key(*LoginPageLocators.EMAIL_FIELD, ProdEnv.EMAIL_VALUE)
def enter_password(self):
self.find_and_send_key(*LoginPageLocators.PASSWORD_FIELD, ProdEnv.PASSWORD_VALUE)
def click_subscribe(self):
self.find_and_click_button(*LoginPageLocators.BUTTON_LOGIN)
def is_page_open(self):
# To make sure that the page is open, check that there is a button "UPGRADE" on the page available
self.is_element_present(*MainPageLocators.BUTTON_UPGRADE)
def is_logged_user_correct(self):
# To make sure that correct user is logged in, compare user's email to email entered into login form
self.find_and_click_button(*MainPageLocators.MENU_ICON)
self.is_element_text_correct(*MainPageLocators.USER_VALID, ProdEnv.EMAIL_VALUE)
|
from database_helper import Database
class StatsHelper():
def __init__(self):
self.database = Database()
print("Stats Helping initialising!")
#Used for content table
def select_all_employee(self):
result = self.database.fetch_all("SELECT * FROM employeedata")
return result
#Used for Total Exercise Hours
# def select_all_hours(self):
#Most Active Month!
def select_all(self):
result = self.database.fetch_all("SELECT b.employee_name, FORMAT(SUM(a.exercise_time)/60, 2) AS totalExerciseTime, FORMAT(SUM(a.sleep_time)/60, 2) AS totalSleep, FORMAT(SUM(a.social_interaction_time)/60, 2) AS totalSocialTime, FORMAT(SUM(a.work_time)/60, 2) AS totalWork FROM dayroutine as a join employeedata b on a.employee_id = b.employee_id GROUP BY b.employee_name")
return result
def join_all(self):
result = self.database.fetch_all("SELECT * FROM dayroutine as a left join employeedata b on a.employee_id = b.employee_id")
return result
def calculate_total_exercise(self):
result = self.database.fetch_all("SELECT FORMAT(SUM(exercise_time)/60,2) as totalExerciseTime FROM dayroutine")
return result
def highest_month(self):
result = self.database.fetch_all("SELECT month, SUM(exercise_time) as totalExerciseTime FROM dayroutine GROUP BY month ORDER BY totalExerciseTime LIMIT 0,1")
return result
def totalStaff(self):
result = self.database.fetch_all("SELECT COUNT(employee_name) FROM employeedata")
return result
def avgEmployeeAge(self):
result = self.database.fetch_all("SELECT FORMAT(SUM(age)/COUNT(employee_name),2) FROM employeedata")
return result
# HINT: You can define more queries here, along with some python logic to calculate!
def calculate_another_stat(self):
# all_rows = self.database.fetch_all("")
return None
|
from django.db import models
from datetime import datetime
from product import Product
class Service(Product):
class Meta:
db_table = 'services'
app_label = 'inventory'
|
from orun.views.dashboard import Dashboard
from orun.contrib import admin
@admin.register('test.dashboard')
class TestDashboard(Dashboard):
def get(self, request):
return '<dashboard>teste</dashboard>'
|
# from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
class TestIndexView(APITestCase):
def test_index_view_status_code(self):
response = self.client.get('')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_index_view_json_data(self):
response = self.client.get('')
expectedResponse = {
'message' : 'Hello AWS Lambda From GitHub Actions!!!'
}
self.assertEqual(response.json(), expectedResponse)
# class TestSklearnLinearRegressionView(APITestCase):
# def test_SklearnLinearRegression_view_status_code(self):
# response = self.client.get('/sklearn_LinearRegression/')
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# response = self.client.get('/sklearn_LinearRegression/?GREScore=337&TOEFLScore=118&UniversityRating=4&SOP=4.5&LOR=4.5&CGPA=9.65&Research=1')
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# def test_SklearnLinearRegression_view_json_data_success(self):
# response = self.client.get('/sklearn_LinearRegression/?GREScore=337&TOEFLScore=118&UniversityRating=4&SOP=4.5&LOR=4.5&CGPA=9.65&Research=1')
# self.assertEqual(response.json()['success'], True)
# def test_SklearnLinearRegression_view_json_data_Invalid(self):
# response = self.client.get('/sklearn_LinearRegression/?blablabla=ggff')
# self.assertEqual(response.json()['success'], False)
class TestTensorflowNeuralNetworksView(APITestCase):
def test_TensorflowNeuralNetworks_view_status_code(self):
response = self.client.get('/tensorflow_neuralNetworks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get('/tensorflow_neuralNetworks/?GREScore=337&TOEFLScore=118&UniversityRating=4&SOP=4.5&LOR=4.5&CGPA=9.65&Research=1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_TensorflowNeuralNetworks_view_json_data_success(self):
response = self.client.get('/tensorflow_neuralNetworks/?GREScore=337&TOEFLScore=118&UniversityRating=4&SOP=4.5&LOR=4.5&CGPA=9.65&Research=1')
self.assertEqual(response.json()['success'], True)
def test_TensorflowNeuralNetworks_view_json_data_Invalid(self):
response = self.client.get('/tensorflow_neuralNetworks/?blablabla=ggff')
self.assertEqual(response.json()['success'], False) |
## Problem 16
def digitsum(n):
l=str(n)
somme=0
for i in range(len(l)):
somme+=int(l[i])
return somme
# print(digitsum(2**1000))
## Probelm 22
alphabet=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
def str_to_num(lettre):
for i in range(len(alphabet)):
if lettre==alphabet[i]:
return (i+1)
# print(str_to_num('D'))
def score_prenom(prenom):
score=0
for i in range(len(prenom)):
score += str_to_num(prenom[i])
return score
# print(score_prenom("COLIN"))
def names_score (liste_noms):
liste_noms.sort()
score_total=0
for i in range(len(liste_noms)):
score_total += (i+1) * score_prenom(liste_noms[i])
return score_total
L = ["MARY","PATRICIA","LINDA","BARBARA","ELIZABETH","JENNIFER"]
# print (names_score(L))
## Problem 55
def test_palindrome(n):
l=str(n)
i=0
j=len(l)-1
bool=True
while (i<j and bool) :
if l[i] != l[j] :
bool=False
i+=1
j-=1
return bool
#print(test_palindrome(15851))
def reverse(n):
l1=str(n)
l2=""
for i in range(len(l1)-1,-1,-1):
l2+=l1[i]
return int(l2)
# print (reverse(1984))
def test_lychrel(n,num_iteration):
N=n+reverse(n)
#print(N)
if test_palindrome(N):
return False
elif num_iteration>50 :
return True
else:
return test_lychrel(N,num_iteration+1)
# print(test_lychrel(349,1))
def compte_lych(n):
somme=0
for i in range(n):
if test_lychrel(i,1):
somme+=1
return somme
# print(compte_lych(10000))
|
from crontab import CronTab
my_cron = CronTab(user='user')
my_cron.remove_all(comment='checkstatus')
my_cron.write()
|
def load_config(config_data):
if 'swift' not in config_data:
raise Exception("swift section is mandatory in the configuration")
required_parameters = ('auth_url', 'user_id', 'project_id', 'password', 'region')
if set(required_parameters) <= set(config_data['swift']):
pass
else:
raise Exception('You must provide {} to access to Swift'.format(required_parameters))
|
#!/usr/bin/env python
"""
From http://stefaanlippens.net/svnignorescript
"""
import optparse
import os
import subprocess
def svn_propget(svnprop, path):
"""Fetch a svn property of a file or directory."""
subproc = subprocess.Popen(
["svn", "propget", svnprop, path],
stdout=subprocess.PIPE
)
subproc.wait()
data = subproc.stdout.read().strip()
return data
def svn_propset(svnprop, value, path):
"""Set a svn property on a file or directory."""
subproc = subprocess.Popen(["svn", "propset", svnprop, value, path])
subproc.wait()
def main():
"""main"""
option_parser = optparse.OptionParser(
usage="usage: %prog FILE|DIR [FILE|DIR...]\n" +
" Ignore (or unignore) subversion files and dirs."
)
option_parser.add_option(
"-u",
"--unignore",
action="store_true",
dest="unignore",
default=False,
help="unignore the file"
)
option_parser.add_option(
"--no-sort",
action="store_false",
dest="sort",
default=True,
help="do not sort property values"
)
(options, args) = option_parser.parse_args()
if len(args) == 0:
option_parser.error("no files or dirs specified")
for path in args:
dirpath, filename = os.path.split(path)
# Get svn:ignore property, with standardized line endings.
svnignore_data = svn_propget("svn:ignore", dirpath).replace("\r", "")
# Transform to a list.
svnignore_data = svnignore_data.split("\n")
# Add or remove filename.
if options.unignore:
if filename not in svnignore_data:
continue
svnignore_data.remove(filename)
else:
if filename in svnignore_data:
continue
svnignore_data.append(filename)
# Optionally sort.
if options.sort:
svnignore_data = sorted(svnignore_data)
svn_propset("svn:ignore", "\n".join(svnignore_data), dirpath)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import nysol._nysolshell_core as n_core
from nysol.mcmd.nysollib.core import NysolMOD_CORE
from nysol.mcmd.nysollib import nysolutil as nutil
class Nysol_Mnormalize(NysolMOD_CORE):
_kwd ,_inkwd,_outkwd = n_core.getparalist("mnormalize",3)
def __init__(self,*args, **kw_args) :
super(Nysol_Mnormalize,self).__init__("mnormalize",nutil.args2dict(args,kw_args,Nysol_Mnormalize._kwd))
def mnormalize(self,*args, **kw_args):
return Nysol_Mnormalize(nutil.args2dict(args,kw_args,Nysol_Mnormalize._kwd)).addPre(self)
setattr(NysolMOD_CORE, "mnormalize", mnormalize)
|
import os
#如果该块中只有一条语句,则可以在同一行中指定它
if os.name == 'posix':
print('You are cool')
def say_hello():
return 'hello'
print(say_hello())
#清单理解
class Employee:
def __str__(self):
return self.name + str(self.salary)
def __init__(self,name,salary):
self.name = name
self.salary = salary
employees = []
employees.append(Employee('Tom',30000))
employees.append(Employee('Satish',40000))
employees.append(Employee('Harry',50000))
#让我们列出工工资超过30000的所有员工的名单
#使用列表理解
sal_more_than_40k = [e for e in employees if e.salary >40000]
print("Listing " + str(len(sal_more_than_40k))+" employees with sal > 40K")
for emp in sal_more_than_40k:
print(emp)
#现在让我们使用lambda表达式获得类似的内容
sal_more_than_30k = filter(lambda e : e.salary > 30000,employees)
print("Listing " + str(len(sal_more_than_30k)) + " employees with sal > 30k")
for emp in sal_more_than_30k:
print(emp)
print(repr(sal_more_than_30k))
print(sal_more_than_30k)
eval(repr(sal_more_than_30k))
|
# generator.py
#
# This file is used to generate unique imagery from a trained vae mode
# It is required to run vae.py before this program can be used
#
# Dependancies
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras import backend as K
import sys
import pickle
import os
import random
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
#Data Builder File: ./data_builder.py
import data_builder as datab
#Data Pipeline for FMOW dataset: ./pipeline.py
from pipeline import load_im, load_manifest, load_manifest_count, load_manifest_rand
#MNIST
from keras.datasets import mnist
#Image Processing
from PIL import Image
# Random Data Generator
# Returns a random normal distribution of a specified size
def genRandData(size):
g1 = tf.random.get_global_generator()
o = g1.normal(shape=[1,size])
return o
# Loads a locally specified file
# Not in use/depreciated
def loadLocal():
l = []
pim = Image.open("inf.jpg")
pim_np = np.asarray(pim)
pim_np = pim_np/255.
l.append(pim_np)
return np.asarray(l)
# Takes a base image and converts to the latent space. Then performs an add operation with a random normal distribution and reconstructs
# encoder : encoder model
# decoder : decoder model
# base_im : A raw image in numpy (x,y,RGB) format
# dim : The size of the latent space
def perturbGen(encoder, decoder, base_im, dim):
enc_im = encoder.predict(base_im)
p_data = genRandData(dim)
c_im = enc_im + p_data
pred_im = decoder.predict(c_im)
return pred_im
# Takes an input image and perturbs a single dimension within a random threshold
# encoder : encoder model
# decoder : decoder model
# base_im : A raw image in numpy (x,y,RGB) format
# target_dimension : Specified dimension to perturb
# threshold : A 2 length tuple containing the min and max of a random integer, (min, max)
def perturbGenSingleThreshold(encoder, decoder, base_im, target_dimension, threshold):
enc_im = encoder.predict(base_im)
enc_im[0][target_dimension] = random.randint(threshold[0], threshold[1])
pred_im = decoder.predict(enc_im)
return pred_im
# Randomly generates a random normal distribution in a give size and generates an image from it
# decoder : decoder model
# dim : Size of the latent space
def randomGen(decoder, dim):
r_data = genRandData(dim)
pred_im = decoder.predict(r_data)
return pred_im
# - Data Needed for Loading the VAE
#CIFAR10 Filename List for importer
CIFAR10_Filenames = ['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5']
LATENT_DIM = 512
HIDDEN_LAYER_DIM = 2048
IMAGE_DIMENSIONS = (512,512)
input_shape = IMAGE_DIMENSIONS + (3,)
#CIFAR 10 Data
#pic_data = datab.load_data_sets(CIFAR10_Filenames)
# Load the model parts
decoder = tf.keras.models.load_model('model/VAE_decoder')
encoder = tf.keras.models.load_model('model/VAE_encoder')
#VAE = tf.keras.models.load_model('model/VAE_full')
##########################################
## CONFIGURATION VALUES ##
# Change these to edit the number of images created
# Number of rows
rows = 5
# Number of images per row
ims_per_row = 5
#Image Plotting Here
total_plot = rows*ims_per_row
# Setup a grid
fig = plt.figure(figsize=(ims_per_row, rows))
fig.set_size_inches(40,40)
grid = ImageGrid(fig, 111, nrows_ncols=(ims_per_row, rows), axes_pad=0.1)
##########################################
#Load Manifest
mf_file = open("train.manifest", "r")
data = mf_file.read()
training_manifest = data.split(" ")
mf_file.close()
# Generate new images here
for i in range(0, total_plot, 3):
base_im = load_manifest_rand(training_manifest, IMAGE_DIMENSIONS, 1)
#Generation happens here. Three predifined functions are availible. Any configuration can be created, however.
gen_im = perturbGen(encoder, decoder, base_im, 512)
# gen_im = perturbGenSingleThreshold(encoder, decoder, base_im, 0, (-10,10))
# gen_im = randomGen(decoder, 512)
#Plot images here
grid[0].set_aspect('equal')
grid[0].imshow(gen_im[0], cmap = plt.cm.binary)
#Output image data here
#plt.show()
fig.savefig("GenImages.png")
|
import pandas as pd
import numpy as np
from autumn.core.project import (
Project,
ParameterSet,
load_timeseries,
build_rel_path,
use_tuned_proposal_sds,
)
from autumn.calibration import Calibration
from autumn.calibration.priors import UniformPrior, TruncNormalPrior
from autumn.calibration.targets import NormalTarget
from autumn.models.sm_sir import base_params, build_model, set_up_random_process
from autumn.settings import Region, Models
# Load and configure model parameters.
mle_path = build_rel_path("params/mle-params.yml")
baseline_params = base_params.update(build_rel_path("params/baseline.yml")).update(
mle_path, calibration_format=True
)
param_set = ParameterSet(baseline=baseline_params, scenarios=[])
# Load and configure calibration settings.
ts_set = load_timeseries(build_rel_path("timeseries.json"))
notifications = pd.concat(
[ts_set["notifications"].loc[s] for s in (slice(199, 415), slice(560, 730))]
)
infection_deaths = ts_set["infection_deaths"].loc[199:]
priors = [
# TruncNormalPrior(
# "sojourn.compartment_periods_calculated.exposed.total_period",
# mean=5.5,
# stdev=0.7,
# trunc_range=(1.0, np.inf),
# ),
# TruncNormalPrior(
# "sojourn.compartment_periods_calculated.active.total_period",
# mean=6.5,
# stdev=0.77,
# trunc_range=(4.0, np.inf),
# ),
# TruncNormalPrior(
# "history.natural_immunity_duration", mean=365.0, stdev=120.0, trunc_range=(180.0, np.inf)
# ),
# TruncNormalPrior(
# "vaccination.vacc_part_effect_duration",
# mean=365.0,
# stdev=120.0,
# trunc_range=(180.0, np.inf),
# ),
# UniformPrior(
# "contact_rate", (0.05, 0.08), jumping_stdev=0.01
# ), # Tighten up the lower limit to avoid wild runss
# UniformPrior("infectious_seed", (50.0, 500.0), jumping_stdev=40.0),
UniformPrior("testing_to_detection.assumed_cdr_parameter", (0.004, 0.012), jumping_stdev=0.002),
# UniformPrior(
# "mobility.microdistancing.behaviour.parameters.end_asymptote",
# (0.1, 0.3),
# jumping_stdev=0.05,
# ),
# UniformPrior("voc_emergence.delta.contact_rate_multiplier", (1.8, 2.4), jumping_stdev=0.1),
# UniformPrior("voc_emergence.delta.start_time", (330.0, 390.0), jumping_stdev=30.0),
]
targets = [
NormalTarget(notifications),
# NormalTarget(infection_deaths)
]
if baseline_params.to_dict()["activate_random_process"]:
rp_params = baseline_params.to_dict()["random_process"]
rp = set_up_random_process(rp_params["time"]["start"], rp_params["time"]["end"], rp_params["order"], rp_params["time"]["step"])
# rp = None # use this when tuning proposal jumping steps
else:
rp = None
# Load proposal sds from yml file
# use_tuned_proposal_sds(priors, build_rel_path("proposal_sds.yml"))
calibration = Calibration(
priors=priors, targets=targets, random_process=rp, metropolis_init="current_params"
)
# FIXME: Replace with flexible Python plot request API.
import json
plot_spec_filepath = build_rel_path("timeseries.json")
with open(plot_spec_filepath) as f:
plot_spec = json.load(f)
# Create and register the project.
project = Project(
Region.MYANMAR, Models.SM_SIR, build_model, param_set, calibration, plots=plot_spec
)
# from autumn.calibration.proposal_tuning import perform_all_params_proposal_tuning
# perform_all_params_proposal_tuning(project, calibration, priors, n_points=50, relative_likelihood_reduction=0.2)
|
#!/usr/bin/python
# a is list, b is list
# l is list of combined with a before b
# then turn l to make b before a
l = []
a1 = 1
a2 = 6
b1 = 3 # a1 < b1 < a2
b2 = 10 # b2 > a2
aLen = a2 - a1
bLen = b2 - b1
lLen = aLen + bLen
for i in range(a1,a2):
l.append(i)
for i in range(b1,b2):
l.append(i)
def show(l=l, aLen=aLen):
print('[', end='')
for i in range(0, aLen):
print(str(l[i])+',', end='')
print(' ', end='')
for i in range(aLen, len(l)):
print(str(l[i])+',', end='')
print(']')
show()
# 旋转a
for i in range(0, aLen//2):
t = l[i]
l[i] = l[aLen - 1 - i]
l[aLen - 1 - i] = t
# 旋转b
for i in range(0, bLen//2):
t = l[aLen + i]
l[aLen + i] = l[lLen - 1 - i]
l[lLen - 1 - i] = t
# 旋转l
for i in range(0, lLen//2):
t = l[i]
l[i] = l[lLen - 1 - i]
l[lLen - 1 - i] = t
show(l, bLen)
|
from scipy.sparse import csr_matrix
from scipy.sparse import vstack
import networkx as nx
import cPickle
import numpy as np
import sys
import tensorflow as tf
import scipy.io as sio
class data(object):
def __init__(self, args):
self.dataset = args.dataset
self.all_x = self.read_attributes(args.attr_filename)
self.all_y = self.read_label(args.label_filename)
self.graph = self.read_network(args.edge_filename)
self.adj_matrix = self.gen_network_adjmatrix(args.edge_filename)
def read_attributes(self, filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
features = []
for line in lines[1:]:
l = line.strip("\n\r").split(" ")
features.append(l)
features = np.array(features, dtype=np.float32)
features[features > 0] = 1.0 # feature binarization
return features
def read_attributes_mat(self, filename):
mat = sio.loadmat(filename)
features = mat['feature']
features[features > 0] = 1.0
return features
def read_label(self, labelFile):
# Read node label and read node label dict
f = open(labelFile, "r")
lines = f.readlines()
f.close()
labels = []
self.labelDict = dict()
for line in lines:
l = line.strip("\n\r").split(" ")
nodeID = int(l[0])
label = int(l[1])
labels.append(label)
if self.labelDict.has_key(label):
self.labelDict[label].append(nodeID)
else:
self.labelDict[label] = [nodeID]
labels = np.array(labels, dtype=np.int32)
return labels
def read_network(self, filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
graph = dict()
for line in lines:
l = line.strip("\n\r").split(" ")
node1 = int(l[0])
node2 = int(l[1])
if not graph.has_key(node1):
graph[node1] = [node2]
else:
graph[node1].append(node2)
if not graph.has_key(node2):
graph[node2] = [node1]
else:
graph[node2].append(node1)
return graph
def gen_network_adjmatrix(self, filename):
G = nx.read_edgelist(filename, nodetype=int, create_using=nx.DiGraph())
G = G.to_undirected()
G_adj = nx.to_numpy_matrix(G)
return G_adj
|
from django.contrib import admin
from app01 import models
# Register your models here.
# shi123456
admin.site.register(models.Direction)
admin.site.register(models.Classification)
admin.site.register(models.Level)
admin.site.register(models.Video)
admin.site.register(models.Img) |
# -*- coding: utf-8 -*-
import re
import scrapy
from ..items import NewsLink
from scrapy_redis.spiders import RedisSpider
class VietnamNewsPulsSpider(RedisSpider):
name = 'vietnam_news_puls'
allowed_domains = ['www.vietnamplus.vn']
start_urls = [
# 'https://www.vietnamplus.vn/chude/bien-dong/205.vnp',
# 'https://www.vietnamplus.vn/chude/xay-dung-dang/820.vnp',
# 'https://www.vietnamplus.vn/chude/hop-quoc-hoi/186.vnp',
# 'https://www.vietnamplus.vn/chude/gia-dien/1030.vnp',
# 'https://www.vietnamplus.vn/chude/duong-day-danh-bac-hang-nghin-ty/1035.vnp',
# 'https://www.vietnamplus.vn/chude/dich-ta-lon-chau-phi/1012.vnp',
# 'https://www.vietnamplus.vn/chude/brexit/770.vnp',
# 'https://www.vietnamplus.vn/chude/tinh-hinh-venezuela/768.vnp',
# 'https://www.vietnamplus.vn/chinhtri.vnp',
# 'https://www.vietnamplus.vn/kinhte.vnp',
# 'https://www.vietnamplus.vn/xahoi.vnp',
# 'https://www.vietnamplus.vn/doisong.vnp',
# 'https://www.vietnamplus.vn/vanhoa.vnp',
# 'https://www.vietnamplus.vn/thethao.vnp',
# 'https://www.vietnamplus.vn/khoahoc.vnp',
# 'https://www.vietnamplus.vn/congnghe.vnp',
# 'https://www.vietnamplus.vn/otoxemay.vnp',
# 'https://www.vietnamplus.vn/moitruong.vnp',
# 'https://www.vietnamplus.vn/dulich.vnp',
# 'https://www.vietnamplus.vn/tinthitruong.vnp',
# 'https://www.vietnamplus.vn/chuyenla.vnp',
# 'https://www.vietnamplus.vn/rapnewsplus.vnp',
# 'https://www.vietnamplus.vn/newsgame.vnp',
# 'https://www.vietnamplus.vn/infographics.vnp',
# 'https://www.vietnamplus.vn/timeline.vnp',
# 'https://www.vietnamplus.vn/topicnews.vnp',
# 'https://www.vietnamplus.vn/photo360.vnp',
# 'https://www.vietnamplus.vn/megastory.vnp',
# 'https://www.vietnamplus.vn/chude/bao-so-12-gay-nhieu-thiet-hai/885.vnp',
"https://www.vietnamplus.vn/chude/nissanrenaultmitsubishi-va-be-boi-cua-ong-ghosn/987.vnp"
]
redis_key = 'vietnam_news_plus_content'
custom_settings = {
'REDIS_HOST': '47.105.132.57',
'REDIS_PORT': 6379,
'REDIS_PARAMS': {
'password': '',
'db': 0
},
}
def parse(self, response):
links = response.xpath('//article/h2/a/@href').extract()
urls = ["https://www.vietnamplus.vn" + link for link in links]
for url in urls:
item = NewsLink()
item['url'] = url
yield item
current_page = re.findall(r"trang(.*?)\.vnp", response.url)
if current_page:
current_page = int(current_page[0])
else:
current_page = 1
last_page = response.xpath('//*[@id="mainContent_ContentList1_pager"]/ul/li[last()]/a/text()').extract()
if current_page < int(last_page[0]):
if "trang" in response.url:
nest_para = response.url.split('/')[-1]
nest_page = response.url.replace(nest_para, '') + "trang{}.vnp".format(current_page + 1)
else:
nest_para = response.url.split('.')[-1]
nest_page = response.url.replace("."+nest_para, '/') + "trang{}.vnp".format(current_page + 1)
yield scrapy.Request(url=nest_page, callback=self.parse, dont_filter=True)
|
from upm import pyupm_i2clcd as lcd
from upm import pyupm_led as Led
from upm import pyupm_temperature as T
from upm import pyupm_light as light_as_hum
from upm import pyupm_tsl2561 as upmTsl2561
import serial
import requests
import time
light_status=0
ir_end = time.time()
ir_finished = True
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62) #lcd part
led = Led.Led(6) #light comp
ir = Led.Led(8) #irrigation relay
temp = T.Temperature(0) #temperature sensor
hum = light_as_hum.Light(3) #humidity sensor
light = upmTsl2561.TSL2561() #light sensor
led.off()
ir.off()
timeNow = time.time()+8*60*60
timeOld = timeNow-61
myLcd.cursorOff() #lcd part
myLcd.setCursor(1,0) #lcd part
myLcd.write('Hello World') #lcd part
i=0
led_on = True
O_counter = 0
last_post=timeOld
def auto():
humidity = hum.value()
ligVal = light.getLux()
if ligVal < 30:
led.on()
print("led on")
else:
led.off()
start = time.time()
while(hum.value()>2 and time.time()-start<6):
ir.on()
while (1):
timeNow = time.time()+8*60*60
if timeNow-timeOld > 60 :
timeOld = timeNow
disp=time.ctime(timeNow)
i = i%100 +10
myLcd.setColor(50+i, 50+i, 50+i) #lcd part
myLcd.home() #lcd part
myLcd.write(disp) #lcd part
if timeNow-last_post>2.5:
celsius = temp.value()
humidity = hum.value()
ligVal = light.getLux()
out = "Tem:%d Hum:%d Lig:%d" % (celsius,humidity,ligVal)
myLcd.setCursor(1,0) #lcd part
myLcd.write(out) #lcd part
print(out)
payload = {'humidity':(2-humidity)*20+50,'temperature':celsius,'light':ligVal}
r = requests.post("http://www.sjtume.cn/api/v1.0/prp/update",data=payload)
response = r.json()
print(response['#status'])
if response['#status']=="200 Successfully Updated":
O_counter = O_counter+1
last_post = timeNow
if O_counter==4: #lcd part
myLcd.setCursor(1,0) #lcd part
myLcd.write("Update Succeeded") #lcd part
O_counter = O_counter%4 #lcd part
else:
myLcd.setCursor(1,0)
myLcd.write("Update Failed")
r = requests.get("http://www.sjtume.cn/api/v1.0/prp/control_center/all_instruction")
response = r.json()
#print(response)
if response['mode']==0:
auto()
elif response['mode']==1:
if light_status!=response['light']:
light_status = response['light']
if light_status==1:
led.on()
else:
led.off()
ir_time=response['ir']
if ir_time>0:
ir_end = time.time()+ir_time
print("new ir_time")
ir.on()
requests.get("http://www.sjtume.cn/api/v1.0/prp/control_center/start_ir/-1")
print("set ir_time to -1")
elif ir_time == -1:
if time.time()>ir_end:
ir.off()
#print("ir turned off")
else:
print("ir_time error")
else:
print("failed to read mode")
print(time.time())
|
import hashlib
import datetime
class Block:
def __init__(self, previous_hash, data):
self.previous_hash = previous_hash
self.data = data
self.timestamp = datetime.datetime.now()
self.hash = self.compute_hash()
def compute_hash(self):
temp = hashlib.sha256()
temp.update(str(self.previous_hash).encode('utf-8')+str(self.data).encode('utf-8')+str(self.timestamp).encode('utf-8'))
return temp.hexdigest()
@staticmethod
def next(data, last_block):
return Block(last_block.hash, data)
blocks = int(input("Enter no of blocks:"))
b = Block("0", "Genesis block, Author: Vikram Kumar")
blockchain = []
blockchain.append(b)
for i in range(blocks):
print("Enter data of block %d:"% i)
data = input()
previous = b
b = Block.next(data, previous)
blockchain.append(b)
block = 0
for blocks in blockchain:
print("block :: %d {\n\tdata:\t\t%s\n\tprevious_hash:\t%s\n\tcurrent_hash:\t%s\n}\n"
%(block,blocks.data, blocks.previous_hash, blocks.hash))
block += 1
|
'''
Faca um programa em Python que recebe tres
numeros e calcule sua media.
'''
# Recebe os numeros do teclado:
n1 = input ("Informe o primeiro numero: ")
n2 = input ("Informe o segundo numero: ")
n3 = input ("Informe o terceiro numero: ")
# Converte para numerico:
n1 = float (n1)
n2 = float (n2)
n3 = float (n3)
# Escreve a media na tela:
media = (n1 + n2 + n3) / 3.0
print (media)
|
import csv, argparse, sys
from collections import defaultdict
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Counts the number of valid/invalid samples in a Tobii data file")
# The Tobii developer's guide suggests that samples
# with a validity code of 2 or higher should be
# considered "invalid"
parser.add_argument("-v", "--validmax", type=int, default=1,
help="Maximum valid code (Tobii recommends 1)")
parser.add_argument("--tsv", action="store_true")
parser.add_argument("csvfile", type=str)
args = parser.parse_args()
# Count up left/right validity codes
codes = defaultdict(int)
with open(args.csvfile, "r") as csv_file:
dialect = "excel"
if args.tsv:
dialect = "excel-tab"
reader = csv.DictReader(csv_file, dialect=dialect)
for row in reader:
try:
v_left = int(row["ValidityLeft"])
v_right = int(row["ValidityRight"])
codes[(v_left, v_right)] += 1
except:
pass
# Print results
total = sum(codes.values())
print("{0:,} samples".format(total))
if total == 0:
sys.exit(0) # No samples!
valid = 0
invalid = 0
for (v_both, count) in codes.iteritems():
if sum(v_both) > args.validmax:
invalid += count
else:
valid += count
print("Valid: {0:,} ({1:.0f}%)".format(valid,
valid / float(total) * 100))
print("Invalid: {0:,} ({1:.0f}%)".format(invalid,
invalid / float(total) * 100))
|
import numpy as np
import os
import sys
import io
import random
import time
import tqdm
import hnswlib
import re
DIM=128
MAX_ITER = 30
CLUSTER_NUM = 150000
SIFT_FEATURES = 44039833
def count_sift_features(inputpath):
print('Read sift Features')
start_time = time.time()
non_decimal = re.compile(r'[^\d]+')
filelist = sorted(os.listdir(inputpath+'.'))
random.shuffle(filelist)
index = 0
fileIndex = 0
for entry in filelist:
# Read features
fileObj = open(inputpath+entry,'r')
fileObj.readline()
lineStrip = fileObj.readline().rstrip()
m = int(lineStrip)
for counter in range(m):
data = fileObj.readline().rstrip()
index+=1
print(index)
fileIndex+=1
print(fileIndex)
print('Read delf fetures Total time: %.3f s' % (time.time() - start_time))
def read_sift_features(inputpath):
print('Read sift Features')
start_time = time.time()
non_decimal = re.compile(r'[^\d]+')
filelist = sorted(os.listdir(inputpath+'.'))
random.shuffle(filelist)
allDesc = np.empty((SIFT_FEATURES,DIM))
index = 0
fileIndex = 0
for entry in filelist:
fileIndex+=1
# Read features
fileObj = open(inputpath+entry,'r')
fileObj.readline()
lineStrip = fileObj.readline().rstrip()
m = int(lineStrip)
#print(entry)
for counter in range(m):
data = fileObj.readline().rstrip()
dataSplit = data.split(' ')
dataSplit = dataSplit[5:]
desc = map(int,dataSplit)
allDesc[index:index+1,:] = desc
index+=1
#print(index)
#print(fileIndex)
#print(entry)
print('Read delf fetures Total time: %.3f s' % (time.time() - start_time))
return allDesc
def get_random_clusters(sift_features):
print('Get random clusters')
idx = random.sample(range(SIFT_FEATURES), CLUSTER_NUM)
return sift_features[idx,:]
def kMeans(delf_features,clusters):
start_time = time.time()
for i in tqdm.trange(MAX_ITER):
print('Build Tree')
p = hnswlib.Index(space='l2', dim=DIM)
p.init_index(max_elements=CLUSTER_NUM, ef_construction=100, M=16)
p.add_items(clusters)
clus_size = np.zeros(CLUSTER_NUM)
new_centers = np.zeros((CLUSTER_NUM,DIM))
print('Search KNN')
index = 0
for feature in delf_features:
labels, distances = p.knn_query(feature, k=1)
new_centers[labels[0,0]] += feature
clus_size[labels[0,0]]+=1
index+=1
sys.stdout.write("\r Percent : %.3f" % (index/float(SIFT_FEATURES)))
sys.stdout.flush()
print('\n')
print('Re-assing cluster')
for j in range(CLUSTER_NUM):
if clus_size[j] > 0:
clusters[j] = new_centers[j] / clus_size[j]
else:
rval = random.randint(0, SIFT_FEATURES-1)
clusters[j] = delf_features[rval]
print('Empty cluster replaced')
if i==MAX_ITER-1:
p.save_index("hsm_150000_sift_30iter_20lan_500_balanc.bin")
print('Total time: %.3f s' % (time.time() - start_time))
def main(inputpath):
if os.path.isdir(inputpath):
#count_sift_features(inputpath)
delf_features = read_sift_features(inputpath)
clusters = get_random_clusters(delf_features)
kMeans(delf_features,clusters)
else:
print "File doesn't exist"
if __name__ == "__main__":
if len(sys.argv) > 1:
main(sys.argv[1])
else:
print "Uso: python kmeans_ann.py features/"
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import os
import subprocess
import unittest
from mechanical_markdown import MechanicalMarkdown, MarkdownAnnotationError
from unittest.mock import patch, MagicMock, call
DEFAULT_TIMEOUT = 300
class MechanicalMarkdownTests(unittest.TestCase):
def setUp(self):
self.command_ouputs = []
self.process_mock = MagicMock()
def pop_command(timeout=None):
stdout, stderr, return_code = self.command_ouputs.pop(0)
self.process_mock.returncode = return_code
return (stdout, stderr)
self.process_mock.communicate.side_effect = pop_command
self.popen_mock = MagicMock()
self.popen_mock.return_value = self.process_mock
self.patcher = patch('mechanical_markdown.command.Popen', self.popen_mock)
self.patcher.start()
def prep_command_ouput(self, stdout, stderr, return_code):
self.command_ouputs.append((stdout, stderr, return_code))
def tearDown(self):
self.patcher.stop()
def test_basic_success(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
@patch("mechanical_markdown.command.os.chdir")
def test_working_dir_success(self, chdir_mock):
test_data = """
<!-- STEP
name: basic test
working_dir: "./foo"
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
chdir_mock.assert_has_calls([call("./foo"), call(os.getcwd())])
@patch("mechanical_markdown.step.time.sleep")
def test_sleep_is_honored(self, sleep_mock):
test_data = """
<!-- STEP
name: basic test
sleep: 10
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
sleep_mock.assert_called_with(10)
def test_env(self):
test_data = """
<!-- STEP
name: env test
env:
ENVA: foo
ENVB: bar
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
expected_env = {"ENVA": "foo", "ENVB": "bar"}
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=dict(os.environ, **expected_env))
def test_background_success(self):
test_data = """
<!-- STEP
name: basic test
background: true
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success = mm.all_steps[0].run_all_commands(False)
self.assertTrue(success)
success = mm.all_steps[0].wait_for_all_background_commands()
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
self.process_mock.communicate.assert_called_with(timeout=DEFAULT_TIMEOUT)
def test_background_failure(self):
test_data = """
<!-- STEP
name: basic test
background: true
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 1)
mm = MechanicalMarkdown(test_data)
success = mm.all_steps[0].run_all_commands(False)
self.assertTrue(success)
success = mm.all_steps[0].wait_for_all_background_commands()
self.assertFalse(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
self.process_mock.communicate.assert_called_with(timeout=DEFAULT_TIMEOUT)
def test_failure_halts_further_executions(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
```bash
echo "test2"
```
<!-- END_STEP -->
<!-- STEP
name: should not be executed
background: true
-->
We had a bug where we were calling join() on threads that never actually got executed. This test tickles that bug.
```bash
echo "This should not be executed"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 1)
self.prep_command_ouput("test2", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertFalse(success)
self.popen_mock.assert_called_once_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
def test_missing_expected_line_causes_failure(self):
test_data = """
<!-- STEP
name: basic test
expected_stdout_lines:
- green
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertFalse(success)
self.popen_mock.assert_called_once_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
def test_expected_lines_succeed_when_matched(self):
test_data = """
<!-- STEP
name: basic test
expected_stdout_lines:
- test
- test2
expected_stderr_lines:
- error
-->
```bash
echo "test"
echo "test2"
echo "error" 1>&2
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test\ntest2", "error", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
calls = [call(['bash', '-c', 'echo "test"\necho "test2"\necho "error" 1>&2'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=DEFAULT_TIMEOUT)]
self.popen_mock.assert_has_calls(calls)
def test_expected_lines_succeed_when_matched_substr(self):
test_data = """
<!-- STEP
name: basic test
output_match_mode: substring
expected_stdout_lines:
- substring
expected_stderr_lines:
-->
```bash
echo "Match a substring"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("Match a substring", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
calls = [call(['bash', '-c', 'echo "Match a substring"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=DEFAULT_TIMEOUT)]
self.popen_mock.assert_has_calls(calls)
def test_exception_raised_for_invalid_match_mode(self):
test_data = """
<!-- STEP
name: basic test
output_match_mode: foo
-->
<!-- END_STEP -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
def test_timeout_is_respected(self):
test_data = """
<!-- STEP
name: basic test
expected_stdout_lines:
- test
timeout_seconds: 5
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
calls = [call(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=5)]
self.popen_mock.assert_has_calls(calls)
def test_dryrun(self):
test_data = """
<!-- STEP
name: basic test
expected_stdout_lines:
- test
- test2
-->
```bash
echo "test"
echo "test2"
```
<!-- END_STEP -->
<!-- STEP
name: step 2
expected_stdout_lines:
- foo
expected_stderr_lines:
- bar
-->
```bash
echo "foo"
echo "bar" >2
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
output = mm.dryrun()
expected_output = """Step: basic test
\tcommands to run with 'bash -c':
\t\t`echo "test"
echo "test2"`
\tExpected stdout:
\t\ttest
\t\ttest2
\tExpected stderr:
\tExpected return code: 0
Step: step 2
\tcommands to run with 'bash -c':
\t\t`echo "foo"
echo "bar" >2`
\tExpected stdout:
\t\tfoo
\tExpected stderr:
\t\tbar
\tExpected return code: 0
"""
self.assertEqual(expected_output, output)
self.popen_mock.assert_not_called()
@patch("mechanical_markdown.command.time.sleep")
def test_timed_out_processes_are_killed(self, sleep_mock):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
def raise_timeout(timeout=DEFAULT_TIMEOUT):
raise subprocess.TimeoutExpired("foo", 60.0)
self.process_mock.communicate.side_effect = raise_timeout
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertFalse(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
self.process_mock.terminate.assert_called()
self.process_mock.kill.assert_called()
self.process_mock.communicate.assert_has_calls([call(timeout=DEFAULT_TIMEOUT), call(timeout=DEFAULT_TIMEOUT)])
@patch("builtins.input")
def test_pause_waits_for_user_input(self, input_mock):
test_data = """
<!-- STEP
name: basic test
manual_pause_message: "Stop Here"
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
input_mock.return_value = 'x'
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(True)
input_mock.assert_called_with("Stop Here\nType 'x' to exit\n")
self.assertTrue(success)
self.popen_mock.assert_called_with(['bash', '-c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
def test_different_shell(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 0)
mm = MechanicalMarkdown(test_data, shell='cmd /c')
success, report = mm.exectute_steps(False)
self.assertTrue(success)
self.popen_mock.assert_called_with(['cmd', '/c', 'echo "test"'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)
def test_missing_end_tag_throws_exception(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
test_data = """
<!-- IGNORE_LINKS -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
def test_missmatched_start_and_end_tags_throws_exception(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
<!-- STEP
name: another basic test
-->
```bash
echo "another test"
```
<!-- END_STEP -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
test_data = """
<!-- IGNORE_LINKS -->
<!-- IGNORE_LINKS -->
<!-- END_IGNORE -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
def test_missing_extra_tag_throws_exception(self):
test_data = """
<!-- STEP
name: basic test
-->
```bash
echo "test"
```
<!-- END_STEP -->
<!-- END_STEP -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
test_data = """
<!-- IGNORE_LINKS -->
<!-- END_IGNORE -->
<!-- END_IGNORE -->
"""
with self.assertRaises(MarkdownAnnotationError):
MechanicalMarkdown(test_data)
def test_expect_status_code_success(self):
test_data = """
<!-- STEP
name: expect returns 1
expected_return_code: 1
-->
```bash
exit 1
```
<!-- END_STEP -->
<!-- STEP
name: ignore return code
expected_return_code:
-->
```bash
exit 15
```
<!-- END_STEP -->
"""
self.prep_command_ouput("test", "", 1)
self.prep_command_ouput("test", "", 15)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
calls = [call(['bash', '-c', 'exit 1'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=DEFAULT_TIMEOUT),
call(['bash', '-c', 'exit 15'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)]
self.popen_mock.assert_has_calls(calls)
def test_steps_with_no_matching_tags_are_skipped(self):
test_data = """
<!-- STEP
name: foo bar
tags:
- foo
- bar
-->
```bash
echo tag foo
echo tag bar
```
<!-- END_STEP -->
<!-- STEP
name: tag match
tags:
- blag
-->
```bash
echo blag
```
<!-- END_STEP -->
"""
self.prep_command_ouput("blag", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False, tags=("blag",))
self.assertTrue(success, report)
calls = [call(['bash', '-c', 'echo blag'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=DEFAULT_TIMEOUT)]
self.popen_mock.assert_has_calls(calls)
def test_all_steps_with_matching_tags_are_executed(self):
test_data = """
<!-- STEP
name: foo bar
tags:
- foo
- bar
-->
```bash
echo tag foo
echo tag bar
```
<!-- END_STEP -->
<!-- STEP
name: foo2
tags:
- foo
-->
```bash
echo foo2
```
<!-- END_STEP -->
"""
self.prep_command_ouput("tag foo\ntag bar", "", 0)
self.prep_command_ouput("foo2", "", 0)
mm = MechanicalMarkdown(test_data)
success, report = mm.exectute_steps(False)
self.assertTrue(success)
calls = [call(['bash', '-c', 'echo tag foo\necho tag bar'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ),
call().communicate(timeout=DEFAULT_TIMEOUT),
call(['bash', '-c', 'echo foo2'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=os.environ)]
self.popen_mock.assert_has_calls(calls)
|
from cutout import cleanup
cleanup()
|
# Generated by Django 2.2 on 2019-04-24 07:58
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('onlclass', '0034_auto_20190421_0925'),
]
operations = [
migrations.AddField(
model_name='reservelesson',
name='is_paid',
field=models.BooleanField(default=False, verbose_name='支払いフラグ'),
),
migrations.AddField(
model_name='reservelesson',
name='pay_datetime',
field=models.DateTimeField(blank=True, null=True, verbose_name='支払い日時'),
),
migrations.AlterField(
model_name='lesson',
name='called_off_date',
field=models.DateTimeField(default=datetime.datetime(2000, 1, 1, 0, 0), verbose_name='中止登録日'),
),
migrations.AlterField(
model_name='subject',
name='subject_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='onlclass.SubjectType', verbose_name='教科カテゴリ'),
),
]
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
import django
django.setup()
from datetime import datetime
from polls.models import Question, Temperature
#from django.utils import timezone
import pyowm
import geopy
from geopy.geocoders import Nominatim
cities = ['New York','San Francisco','Boston','Chicago']
owm = pyowm.OWM('29d7de7caa102eec0f97034cd9f208cb')
print Question.objects.all()
#print Temperature.objects.all()
#for i in range(0,4):
# q = Question(city_text=cities[i])
# q.save()
#print Question.objects.all()
for i in range(0,4):
q = Question.objects.filter(city_text=cities[i])
#print q.temperature_set.all()
observation = owm.weather_at_place(str(cities[i])+',US')
w = observation.get_weather()
x = w.get_temperature('celsius')
geolocator = Nominatim()
y = geolocator.geocode(cities[i])
t = []
fin = y.raw
lo = fin['lon']
la = fin['lat']
q.update(lat = la,lon = lo)
#print q.lon
#print q.lat
#q.temperature_set.create(temperature_data= x['temp'])
#t.temperature_set.create(location = c)
#print q.temperature_set.all()
#print t.temperature_set.all()
|
#!/usr/bin/env python
# Fakelag.py - Auto Fakelag excempt for authenticated clients.
# Licensed under GPL3, Free Software etc etc
import weechat
import re
NAME = "Fakelag.py"
AUTHOR = "Ferus - irc.datnode.net #hacking"
VERSION = "1.0"
LICENSE = "GPL3"
DISC = "Auto Fakelag exempt script for clients who authenticate to nickserv."
# default settings
settings = {
'server': 'DatNode'
,'channel': '#services'
,'command': '/quote privmsg operserv flood {0}'
,'allowed': 'stal,WhergBot'
}
if weechat.register(NAME, AUTHOR, VERSION, LICENSE, DISC, "", ""):
for option, value in settings.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value)
def GetSetting(Setting):
return weechat.config_string(weechat.config_get('plugins.var.python.fakelag.py.'+Setting))
def Main(data, buffer, date, tags, displayed, highlight, prefix, message):
for x in GetSetting('allowed').split(','):
if re.match("NickServ: {0}!.*?@.*? identified for nick {0}".format(x), message):
weechat.command(buffer, GetSetting('command').format(x))
break
return weechat.WEECHAT_RC_OK
hook = weechat.hook_print(
GetSetting("server")
,"irc_privmsg,notify_message,nick_Global,log1"
,""
,1
,"Main"
,""
)
|
from unittest import TestCase
from tutnese import encode_duplicates, encode, decode
class TestTutnese(TestCase):
# Tests for encode_duplicates()
def test_encode_duplicates_empty_string(self):
self.assertEqual(encode_duplicates(""), "")
def test_encode_duplicates_pdf_string(self):
pdf_string = "Over hill, over dale, Thorough bush, thorough brier, Over park, over pale, \nThorough flood, thorough fire!"
solution_string = "Over hi|l, over dale, Thorough bush, thorough brier, Over park, over pale, \nThorough flood, thorough fire!"
self.assertEqual(encode_duplicates(pdf_string),solution_string)
def test_encode_dupliactes_double_vowels(self):
test_string = "This sstringg has dupliccaatee consonaants and voowells."
solution_string = "This |strin|g has dupli|caatee consonaants and voowe|ls."
self.assertEqual(encode_duplicates(test_string), solution_string)
# ------------------------------
# Tests for encode()
def test_encode_invalid(self):
with self.assertRaises(ValueError):
encode("Invalid String |")
def test_encode_empty(self):
self.assertEqual(encode(""), "")
def test_encode_spaces(self):
self.assertEqual("\t ", encode("\t "))
def test_encode_pdf_string(self):
input_string = "Over hill, over dale, Thorough bush, thorough brier, Over park, over pale, Thorough flood, thorough fire!"
solution_string = "ovuverur hashisqual, ovuverur dudalule, tuthashorurougughash bubusushash, tuthashorurougughash bubrurierur, ovuverur puparurkuck, ovuverur pupalule, tuthashorurougughash fufluloodud, tuthashorurougughash fufirure!"
self.assertEqual(solution_string, encode(input_string))
# ------------------------------
# Tests for decode()
def test_decode_invalid(self):
with self.assertRaises(ValueError):
decode("Invalid String |")
def test_decode_empty(self):
self.assertEqual(decode(""), "")
def test_decode_spaces(self):
self.assertEqual("\t ", decode("\t "))
def test_decode_pdf_string(self):
input_string = "ovuverur hashisqual, ovuverur dudalule, tuthashorurougughash bubusushash, tuthashorurougughash bubrurierur, ovuverur puparurkuck, ovuverur pupalule, tuthashorurougughash fufluloodud, tuthashorurougughash fufirure!"
solution_string = "Over hill, over dale, Thorough bush, thorough brier, Over park, over pale, Thorough flood, thorough fire!".lower()
self.assertEqual(solution_string, decode(input_string)) |
from data_structures.array import find_max
""" COUNT SORT
Time Complexity: O(n + maxval) - (can be maxval - minval)
Space Complexity: O(n + k) - len(out) and len(cum)
Notes:
1. Efficient when k < n.
2. Uses partial hashing to count occurence in O(1).
To determine:
1. Parallelizing?
2. Stable, online? <- what do these mean?
3. Does it only work with integers?
Tasks, things to experiment:
1. Modify to work on negative integers.
2. Reduce time complexity to maxval - minval.
Other:
Below looks a bit weird so it works with radix,
when implementing generally don't include sort_on.
"""
def count_sort(arr, sort_on=lambda x: x):
maxval = find_max(list(map(sort_on, arr)))
n = len(arr)
cum = [0 for _ in range(maxval + 1)]
out = [None for i in range(n)]
for _, val in enumerate(arr):
cum[sort_on(val)] += 1
for i in range(1, len(cum)):
cum[i] += cum[i - 1]
# reverse for radix
for val in reversed(arr):
out[cum[sort_on(val)] - 1] = val
cum[sort_on(val)] -= 1
return out
if __name__ == "__main__":
arr = [10, 21, 42, 18, 93]
print(arr)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: yuanzi
import turtle
import time
def main():
#设置一个画面
windows = turtle.Screen()
#设置背景
windows.bgcolor('pink')
#生成一个黄色乌龟
bran = turtle.Turtle()
bran.shape('turtle')
bran.color('red')
#开始你的表演
turtle.home()
turtle.dot()
for i in range(1,10):
turtle.fd(50)
turtle.dot(30,"blue")
turtle.fd(50)
turtle.position()
(100.00,-0.00)
turtle.heading()
0.0
turtle.fd(50)
turtle.right(90)
turtle.fd(50)
turtle.stamp()
11
turtle.fd(50)
time.sleep(10)
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
s=raw_input()
l=0
ans1=''
ans2=''
for i in s:
if i.islower():
l+=1
ans1+=i.lower()
ans2+=i.upper()
if l*2>=len(s):
print ans1
else:
print ans2
|
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from enum import Enum
import os
from typing import Any, Optional, List, Dict, Union
from dataclasses import dataclass
from quart import jsonify, Response
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from wechaty.config import config
@dataclass
class NavMetadata:
"""nav metadata"""
view_url: Optional[str] = None
author: Optional[str] = None # name of author
avatar: Optional[str] = None # avatar of author
author_link: Optional[str] = None # introduction link of author
icon: Optional[str] = None # avatar of author
@dataclass
class NavDTO:
"""the data transfer object of plugin list"""
name: str # name of plugin
status: int # status of plugin: 0 / 1
view_url: Optional[str] = None
author: Optional[str] = None # name of author
avatar: Optional[str] = None # avatar of author
author_link: Optional[str] = None # introduction link of author
icon: Optional[str] = None # avatar of author
def update_metadata(self, nav_metadata: NavMetadata) -> None:
"""update the field with nav data
"""
self.author = nav_metadata.author
self.author_link = nav_metadata.author_link
self.avatar = nav_metadata.avatar
self.icon = nav_metadata.icon
self.view_url = nav_metadata.view_url
def success(data: Any) -> Response:
"""make the success response with data
Args:
data (dict): the data of response
"""
return jsonify(dict(
code=200,
data=data
))
def error(msg: str) -> Response:
"""make the error response with msg
Args:
msg (str): the error msg string of data
"""
return jsonify(dict(
code=500,
msg=msg
))
@dataclass
class WechatyPluginOptions:
"""options for wechaty plugin"""
name: Optional[str] = None
metadata: Optional[dict] = None
@dataclass
class WechatySchedulerOptions:
"""options for wechaty scheduler"""
job_store: Union[str, SQLAlchemyJobStore] = f'sqlite:///{config.cache_dir}/job.db'
job_store_alias: str = 'wechaty-scheduler'
class PluginStatus(Enum):
"""plugin running status"""
Running = 0
Stopped = 1
class StaticFileCacher:
"""cache the static file to avoid time-consuming finding and loading
"""
def __init__(self, cache_dirs: Optional[List[str]] = None) -> None:
self.file_maps: Dict[str, str] = {}
self.cache_dirs = cache_dirs or []
def add_dir(self, static_file_dir: Optional[str]) -> None:
"""add the static file dir
Args:
static_file_dir (str): the path of the static file
"""
if not static_file_dir:
return
self.cache_dirs.append(static_file_dir)
def _find_file_path_recursive(self, base_dir: str, name: str) -> Optional[str]:
"""find the file based on the file-name which will & should be union
Args:
base_dir (str): the root dir of static files for the plugin
name (str): the union name of static file
Returns:
Optional[str]: the target static file path
"""
if not os.path.exists(base_dir) or os.path.isfile(base_dir):
return None
for file_name in os.listdir(base_dir):
if file_name == name:
return os.path.join(base_dir, file_name)
file_path = os.path.join(base_dir, file_name)
target_path = self._find_file_path_recursive(file_path, name)
if target_path:
return target_path
return None
def find_file_path(self, name: str) -> Optional[str]:
"""find the file based on the file-name which will & should be union
Args:
name (str): the union name of static file
Returns:
Optional[str]: the path of the static file
"""
if name in self.file_maps:
return self.file_maps[name]
for cache_dir in self.cache_dirs:
file_path = self._find_file_path_recursive(cache_dir, name)
if file_path:
return file_path
return None
|
mem = open('input.txt', 'r').read().rstrip().split(',')
def mint(nr):
if(type(nr) is str):
return int(nr)
else:
return nr
# len of mem: 678
addr = 0
while(True):
instruction = mem[addr]
opcode = instruction[-2:] if len(instruction) >= 2 else '0' + instruction
modes = [0, 0, 0]
i = 2
for m in reversed(instruction[:-2]):
modes[i] = mint(m)
i -= 1
# print(opcode)
if(opcode == '99'):
break
elif(opcode == '08'): #equal
a = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
b = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
put = 0
if(a == b):
put = 1
mem[mint(mem[addr+3])] = put
addr += 4
elif(opcode == '07'): #less-than
a = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
b = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
put = 0
if(a < b):
put = 1
mem[mint(mem[addr+3])] = put
addr += 4
elif(opcode == '06'): #jump-if-false
which = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
if(which == 0):
addr = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
else:
addr += 3
elif(opcode == '05'): #jump-if-true
which = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
if(which != 0):
addr = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
else:
addr += 3
elif(opcode == '04'):
where = mint(mem[addr + 1])
mode = 1 if instruction[0] == '1' else 0
print('dc:', mem[where] if mode == 0 else where)
addr += 2
elif(opcode == '03'):
where = mint(mem[addr + 1])
mem[where] = '5'
addr += 2
elif(opcode == '02'): #multiply
a = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
b = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
# print(a, b)
mem[mint(mem[addr+3])] = str(a * b)
addr += 4
elif(opcode == '01'): #add
a = mint(mem[addr+1]) if modes[2] == 1 else mint(mem[mint(mem[addr+1])])
b = mint(mem[addr+2]) if modes[1] == 1 else mint(mem[mint(mem[addr+2])])
# print(a, b)
mem[mint(mem[addr+3])] = str(a + b)
addr += 4
else:
print('unkown opcode')
break
print(instruction, modes)
# break
|
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
def send_subscribe_mail(email, **kwargs):
subject= 'AMD Telecom'
html = render_to_string('subscribe.html')
send_mail(
subject=subject,
message='',
from_email=settings.EMAIL_HOST_USER,
recipient_list=[email],
html_message=html,
) |
"""
Verify a SciToken from command-line inputs.
"""
import argparse
import scitokens
from scitokens.utils.errors import InvalidTokenFormat
from scitokens.utils.errors import MissingIssuerException
from scitokens.utils.errors import MissingKeyException
from scitokens.utils.errors import NonHTTPSIssuer
from scitokens.utils.errors import SciTokensException
from scitokens.utils.errors import UnableToCreateCache
from scitokens.utils.errors import UnsupportedKeyException
def add_args():
"""
Generate the ArgumentParser object for the CLI.
"""
parser = argparse.ArgumentParser(description='Verify a new SciToken')
parser.add_argument('token', type=str, nargs=1, help='The serialized string of SciToken')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return args
def main():
"""
Given a serialized SciToken, verify it and return an error message in case of failure.
"""
args = add_args()
stoken = None
try:
stoken = scitokens.SciToken.deserialize(args.token[0])
if args.verbose:
print("Claims:")
for claim in stoken.claims():
print("{}".format(claim))
except MissingKeyException:
print("No private key is present.")
except UnsupportedKeyException:
print("The provided algorithm in the token is not the one supported by SciToken library (RS256, ES256).")
except MissingIssuerException:
print("Issuer not specific in claims or as argument.")
except NonHTTPSIssuer:
print("Issuer is not over HTTPS. RFC requires it to be over HTTPS.")
except InvalidTokenFormat:
print("Serialized token is not a readable format.")
except UnableToCreateCache as utcce:
print("Unable to create cache: {}".format(str(utcce)))
except SciTokensException as scite:
print("An error raised from SciTokens library while verifying the token: {}".format(str(scite)))
except Exception as exc:
print("An error occurred while verifying the token: {}".format(str(exc)))
if __name__ == "__main__":
main()
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import chi2
class DataGenerator:
@staticmethod
def next(p1, p2):
click1 = 1 if (np.random.random() < p1) else 0
click2 = 1 if (np.random.random() < p2) else 0
return click1, click2
@staticmethod
def get_p_value(T):
determinant = T[0,0] * T[1,1] - T[0,1] * T[1,0]
c2 = float(determinant) / T[0].sum() * determinant / T[1].sum() * T.sum() / T[:,0].sum() / T[:,1].sum()
p = 1 - chi2.cdf(x = c2, df = 1)
return p
@staticmethod
def run_experiment(N, p1, p2):
p_values = np.empty(N)
T = np.zeros((2,2)).astype(np.float32)
for i in range(N):
c1, c2 = DataGenerator.next(p1, p2)
T[0,c1] += 1
T[1,c2] += 1
if i <= 10:
p_values[i] = None
else:
p_values[i] = DataGenerator.get_p_value(T)
plt.plot(p_values)
plt.plot(np.ones(N) * 0.05)
plt.show()
DataGenerator.run_experiment(20000, 0.10, 0.11) |
number = eval(input('Enter a number: '))
if number >= 1 and number <= 20:
print(f'{number} is between 1 and 20')
else:
print(f'The number is not between 1 and 20') |
import json
import attr
import numpy as np
from scipy.ndimage import convolve
# from simulation.coordinates import Voxel
# from simulation.grid import RectangularGrid
from simulation.module import Module, ModuleState
from simulation.modules.geometry import GeometryState, TissueTypes
from simulation.molecule import MoleculeGrid, MoleculeTypes
from simulation.state import State
def molecule_grid_factory(self: 'MoleculesState'):
return MoleculeGrid(grid=self.global_state.grid)
@attr.s(kw_only=True, repr=False)
class MoleculesState(ModuleState):
grid: MoleculeGrid = attr.ib(default=attr.Factory(molecule_grid_factory, takes_self=True))
diffusion_rate: float
cyto_evap_m: float
cyto_evap_n: float
iron_max: float
class Molecules(Module):
name = 'molecules'
StateClass = MoleculesState
def initialize(self, state: State):
molecules: MoleculesState = state.molecules
geometry: GeometryState = state.geometry
# check if the geometry array is empty
if not np.any(geometry.lung_tissue):
raise RuntimeError('geometry molecule has to be initialized first')
molecules.diffusion_rate = self.config.getfloat('diffusion_rate')
molecules.cyto_evap_m = self.config.getfloat('cyto_evap_m')
molecules.cyto_evap_n = self.config.getfloat('cyto_evap_n')
molecules.iron_max = self.config.getfloat('iron_max')
molecules_config = self.config.get('molecules')
json_config = json.loads(molecules_config)
for molecule in json_config:
name = molecule['name']
init_val = molecule['init_val']
init_loc = molecule['init_loc']
if name not in [e.name for e in MoleculeTypes]:
raise TypeError(f'Molecule {name} is not implemented yet')
for loc in init_loc:
if loc not in [e.name for e in TissueTypes]:
raise TypeError(f'Cannot find lung tissue type {loc}')
molecules.grid.append_molecule_type(name)
for loc in init_loc:
molecules.grid.concentrations[name][
np.where(geometry.lung_tissue == TissueTypes[loc].value)
] = init_val
if 'source' in molecule:
source = molecule['source']
incr = molecule['incr']
if source not in [e.name for e in TissueTypes]:
raise TypeError(f'Cannot find lung tissue type {source}')
molecules.grid.sources[name][
np.where(geometry.lung_tissue == TissueTypes[init_loc[0]].value)
] = incr
return state
def advance(self, state: State, previous_time: float):
"""Advance the state by a single time step."""
molecules: MoleculesState = state.molecules
# iron = molecules.grid['iron']
# with open('testfile.txt', 'w') as outfile:
# for data_slice in iron:
# np.savetxt(outfile, data_slice, fmt='%-7.2f')
# for molecule in molecules.grid.types:
# self.degrade(molecules.grid[molecule])
# self.diffuse(molecules.grid[molecule], state.grid, state.geometry.lung_tissue)
# self.diffuse_iron(
# molecules.grid['iron'], state.grid, state.geometry.lung_tissue, molecules.iron_max
# )
# self.degrade(molecules.grid['m_cyto'], molecules.cyto_evap_m)
# self.diffuse(molecules.grid['m_cyto'], state.grid, state.geometry.lung_tissue)
# self.degrade(molecules.grid['n_cyto'], molecules.cyto_evap_n)
# self.diffuse(molecules.grid['n_cyto'], state.grid, state.geometry.lung_tissue)
for _ in range(3):
molecules.grid.incr()
self.convolution_diffusion(
molecules.grid['iron'], state.geometry.lung_tissue, molecules.iron_max
)
self.degrade(molecules.grid['m_cyto'], molecules.cyto_evap_m)
self.convolution_diffusion(molecules.grid['m_cyto'], state.geometry.lung_tissue)
self.degrade(molecules.grid['n_cyto'], molecules.cyto_evap_n)
self.convolution_diffusion(molecules.grid['n_cyto'], state.geometry.lung_tissue)
return state
@classmethod
def convolution_diffusion(cls, molecule: np.ndarray, tissue: np.ndarray, threshold=None):
if len(molecule.shape) != 3:
raise ValueError(f'Expecting a 3d array. Get dim = {len(molecule.shape)}')
weights = np.full((3, 3, 3), 1 / 27)
molecule[:] = convolve(molecule, weights, mode='constant')
molecule[(tissue == TissueTypes.AIR.value)] = 0
if threshold:
molecule[molecule > threshold] = threshold
@classmethod
def degrade(cls, molecule: np.ndarray, evap: float):
molecule *= 1 - evap
# @classmethod
# def degrade(cls, molecule: np.ndarray, evap: float):
# # TODO These 2 functions should be implemented for all moleculess
# # the rest of the behavior (uptake, secretion, etc.) should be
# # handled in the cell specific module.
# for index in np.argwhere(molecule > 0):
# z = index[0]
# y = index[1]
# x = index[2]
# molecule[z, y, x] = molecule[z, y, x] * (1 - evap)
# return
# @classmethod
# def diffuse_iron(cls, iron: np.ndarray, grid: RectangularGrid, tissue, iron_max):
# # TODO These 2 functions should be implemented for all moleculess
# # the rest of the behavior (uptake, secretion, etc.) should be
# # handled in the cell specific module.
# for index in np.argwhere(tissue == TissueTypes.BLOOD.value):
# iron[index[0], index[1], index[2]]
# = min([iron[index[0], index[1], index[2]], iron_max])
# temp = np.zeros(iron.shape)
# x_r = [-1, 0, 1]
# y_r = [-1, 0, 1]
# z_r = [-1, 0, 1]
# for index in np.argwhere(temp == 0):
# for x in x_r:
# for y in y_r:
# for z in z_r:
# zk = index[0] + z
# yj = index[1] + y
# xi = index[2] + x
# if grid.is_valid_voxel(Voxel(x=xi, y=yj, z=zk)):
# temp[index[0], index[1], index[2]] += iron[zk, yj, xi] / 26
# if tissue[index[0], index[1], index[2]] == TissueTypes.AIR.value:
# temp[index[0], index[1], index[2]] = 0
# iron[:] = temp[:]
# return
# @classmethod
# def diffuse(cls, molecule: np.ndarray, grid: RectangularGrid, tissue):
# # TODO These 2 functions should be implemented for all moleculess
# # the rest of the behavior (uptake, secretion, etc.) should be
# # handled in the cell specific module.
# temp = np.zeros(molecule.shape)
# x_r = [-1, 0, 1]
# y_r = [-1, 0, 1]
# z_r = [-1, 0, 1]
# for index in np.argwhere(temp == 0):
# for x in x_r:
# for y in y_r:
# for z in z_r:
# zk = index[0] + z
# yj = index[1] + y
# xi = index[2] + x
# if grid.is_valid_voxel(Voxel(x=xi, y=yj, z=zk)):
# temp[index[0], index[1], index[2]] += molecule[zk, yj, xi] / 26
# if tissue[index[0], index[1], index[2]] == TissueTypes.AIR.value:
# temp[index[0], index[1], index[2]] = 0
# molecule[:] = temp[:]
# return
|
from translationstring import TranslationStringFactory
_ = TranslationStringFactory('onegov.town')
|
from django.db import models
from user.models import User
from project_management.models import Project
# Drawings
class Plans(models.Model):
title = models.CharField(max_length=255)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.title
class PlanOption(models.Model):
title = models.CharField(max_length=255)
def __str__(self):
return self.title
class Drawing(models.Model):
uploaded_by = models.ForeignKey(User, on_delete=models.CASCADE)
parent_option = models.ForeignKey(PlanOption, on_delete=models.CASCADE)
details = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
drawing = models.FileField()
def __str__(self):
return self.parent_option.title
class DrawingRemark(models.Model):
remarked_by = models.ForeignKey(User, on_delete=models.CASCADE)
drawing = models.ForeignKey(Drawing, on_delete=models.CASCADE)
remark = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.drawing.id
|
import math
import pygame
class Player:
def __init__(self, x = 0, y=0, dx=0, dy=0, hp=0):
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.size = 30
self.hp = hp
self.playership = pygame.image.load('images/ship.png').convert_alpha()
self.imgmid = self.x + self.playership.get_width()/2
self.x = self.x - self.playership.get_width()/2
#Player methods
#draw: Player, image -> image
#draw the Player on an image
def draw(self, image):
image.blit(self.playership, (self.x,self.y))
return image
#move: Player -> None
def move(self):
self.move_horz()
self.move_vert()
self.imgmid = self.x + self.playership.get_width()/2
#write a comment
def right(self):
return self.x + self.playership.get_width()
#write a comment
def left(self):
return self.x
#write a comment
def top(self):
return self.y
#write a comment
def bottom(self):
return self.y + self.playership.get_height()
#move: Player -> None
#move the move horizontally
#Hint: add x + dx
def move_horz(self):
self.x = self.x + self.dx
return self.x
#move: Player -> None
#move the move vertically
#Hint: add y + dy
def move_vert(self):
self.y = self.y + self.dy
return self.y
#write a comment
def bounce_horiz(self):
self.dx = (-self.dx)
#write a comment
def bounce_vert(self):
self.dy = (-self.dy)
|
def greet(name):
print("This is out")
def say_hello():
print("My name is %s" % name)
return say_hello
ret = greet("Lechrond")
ret()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.