blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
692da999c2b06adfee50cf10ed6fcc7a977ca421 | 70aa3a1cefde69909fd9a2ede9a4bdfbb7e434ad | /sobel.py | 36ea7cd7ce655d9c20410d4d2e870acffa768b1a | [] | no_license | cflin-cjcu/opencvttt | 512cf2ba70cc9d30cb97e733172d94396cfff56f | 07add34638fc043d02f09459246aa47975c5a9c7 | refs/heads/master | 2023-06-27T16:00:20.070365 | 2021-07-30T08:54:14 | 2021-07-30T08:54:14 | 389,538,395 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('./images/test1.jpg',0)
sobel = cv.Sobel(img,cv.CV_8U,1,1,ksize=3)
sobelx = cv.Sobel(img,cv.CV_32F,1,0,ksize=3)
sobely = cv.Sobel(img,cv.CV_32F,0,1,ksize=3)
sobel2 = cv.Sobel(img,cv.CV_8U,1,1,ksize=5)
cv.imshow('img',img)
cv.imshow('sobel3',sobel)
cv.imshow('sobelx',sobelx)
cv.imshow('sobely',sobely)
cv.imshow('sobel5',sobel2)
cv.waitKey(0)
cv.destroyAllWindows() | [
"cflin@mail.cjcu.edu.tw"
] | cflin@mail.cjcu.edu.tw |
75a918045d1c47fb55559cf79ac0ec0d1def950c | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003/Gather2_W_fixGood_C_change/train/pyr_1s/L4/step10_a.py | bc3f9d3b9536723a08c7bd94f59d700f75d14a81 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,830 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_1side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003.W_w_M_to_C_pyr.pyr_1s.L4.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
7dd64a3c50ed789c5881b6291252c71d5b8ced14 | 91d7987874dcfa0d8dbbd9a3a3831ed9b67691f8 | /yamaxun/yamaxun/pipelines.py | 8013054e6fa3e4bd1a8179243b752ee656e01dd5 | [] | no_license | lsz1995/amazon | c9388cc78f8465804b53e8759940ebc9625cbdd6 | e648ff21f642632e30925ffab1d3a4608eb201ca | refs/heads/master | 2020-03-19T05:55:23.726649 | 2018-06-04T05:31:22 | 2018-06-04T05:31:22 | 135,974,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from yamaxun.items import YamaxunItem
class YamaxunPipeline(object):
def process_item(self, item, spider):
return item
class mongoPipeline(object):
collection = 'huawei'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_RUI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
# 爬虫启动将会自动执行下面的方法
def open_spider(self,spider):
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
# 爬虫项目关闭调用的方法
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
if isinstance(item,YamaxunItem):
table = self.db[self.collection]
data = dict(item)
table.update({'ID':item["ID"]},{'$set':data},True)
#table.insert_one(data)
return "OK!"
| [
"qqlsz87@126.com"
] | qqlsz87@126.com |
ed5ee8aa6d39cf4b84696afef021cd774a662000 | edfb435ee89eec4875d6405e2de7afac3b2bc648 | /branches/os_x_native_events/py/selenium/webdriver/firefox/webdriver.py | bcd92df9ba2c6fe70e1a5d3cb1a663660412ab26 | [
"Apache-2.0"
] | permissive | Escobita/selenium | 6c1c78fcf0fb71604e7b07a3259517048e584037 | f4173df37a79ab6dd6ae3f1489ae0cd6cc7db6f1 | refs/heads/master | 2021-01-23T21:01:17.948880 | 2012-12-06T22:47:50 | 2012-12-06T22:47:50 | 8,271,631 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httplib
from selenium.webdriver.common.exceptions import ErrorInResponseException
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.firefox.firefoxlauncher import FirefoxLauncher
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.extensionconnection import ExtensionConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import urllib2
import socket
class WebDriver(RemoteWebDriver):
"""The main interface to use for testing,
which represents an idealised web browser."""
def __init__(self, profile=None, timeout=30):
"""Creates a webdriver instance.
Args:
profile: a FirefoxProfile object (it can also be a profile name,
but the support for that may be removed in future, it is
recommended to pass in a FirefoxProfile object)
timeout: the amount of time to wait for extension socket
"""
port = self._free_port()
self.browser = FirefoxLauncher()
if type(profile) == str:
# This is to be Backward compatible because we used to take a
# profile name
profile = FirefoxProfile(name=profile, port=port)
if not profile:
profile = FirefoxProfile(port=port)
self.browser.launch_browser(profile)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection(timeout),
desired_capabilities=DesiredCapabilities.FIREFOX)
def _free_port(self):
port = 0
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((socket.gethostname(), 0))
port = free_socket.getsockname()[1]
free_socket.close()
return port
def _execute(self, command, params=None):
try:
return RemoteWebDriver.execute(self, command, params)
except ErrorInResponseException, e:
# Legacy behavior: calling close() multiple times should not raise
# an error
if command != Command.CLOSE and command != Command.QUIT:
raise e
except urllib2.URLError, e:
# Legacy behavior: calling quit() multiple times should not raise
# an error
if command != Command.QUIT:
raise e
def create_web_element(self, element_id):
"""Override from RemoteWebDriver to use firefox.WebElement."""
return WebElement(self, element_id)
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except httplib.BadStatusLine:
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
self.browser.kill()
def save_screenshot(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
"""
png = self._execute(Command.SCREENSHOT)['value']
try:
f = open(filename, 'w')
f.write(base64.decodestring(png))
f.close()
except IOError:
return False
finally:
del png
return True
| [
"simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9"
] | simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9 |
9e4b4043b10db5279e165da2fd0224214758405e | 8b2e795c3040a2ef1d3f0c21752bec57a0614bd6 | /venv/Scripts/pilfont.py | bbb82dc78d2a2729dce7cbeafae8e6fb6cfc97ae | [] | no_license | harshit8858/NHDO | c75e244dfdc91817b3047d65c7be610f3e18aba3 | 6a5ea2de4ba607c20c0b9bd241e6b1c82090eba9 | refs/heads/master | 2023-01-06T20:18:33.795898 | 2018-01-03T07:39:04 | 2018-01-03T07:39:04 | 105,629,451 | 1 | 3 | null | 2022-12-20T22:32:34 | 2017-10-03T08:26:57 | Python | UTF-8 | Python | false | false | 1,057 | py | #!c:\users\harshi~1\nhdo\venv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"harshit8858@gmail.com"
] | harshit8858@gmail.com |
df9a72c962406e7277fb7cc77a71ebf225417ea8 | 635e0b896b75b7d496f60368ae4e1a8b8b41546a | /api/v1/views/index.py | 703b0f26519138ec448b84b06203f7aa84340147 | [
"LicenseRef-scancode-public-domain"
] | permissive | SimonBr017/AirBnB_clone_v3 | 3add07db83b11b606084c27ce271ece9f5e255ff | 9427f89680c12fdc9f2b4b30f3a7a2ad2405cbb4 | refs/heads/main | 2023-08-28T10:29:58.841924 | 2021-09-20T17:57:33 | 2021-09-20T17:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/python3
""" Blueprint routes """
from api.v1.views import app_views
from flask import jsonify
from models import storage
@app_views.route('/status', strict_slashes=False)
def status():
""" Return the status in JSON format """
return jsonify({'status': 'OK'})
@app_views.route('/stats', strict_slashes=False)
def stats():
""" Retrieves the number of each objects by type """
return {"amenities": storage.count('Amenity'),
"cities": storage.count('City'),
"places": storage.count('Place'),
"reviews": storage.count('Review'),
"states": storage.count('State'),
"users": storage.count('User')
}
| [
"etiennebrxv@gmail.com"
] | etiennebrxv@gmail.com |
43363719bdcfc095aafb427a6bd230358c843e28 | 8b53a8b9803d92003f3a3a9e1b08def7642ba35d | /TALLERES/TAL3_while_for_20210217_cur/2_while_1hastan.py | 61c8e9c8dec335c9e77d6c5eca291edc005ba68a | [] | no_license | smarulan613/fundamentos_prog_20211_sebasmc | 637cdf9e1f61de0f876fe74530df4e6a5b40d6a6 | 0a87d81dae2bd5656a3e6a521585da661efe6cf6 | refs/heads/main | 2023-05-02T04:26:47.035698 | 2021-05-27T03:37:05 | 2021-05-27T03:37:05 | 356,059,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 19:38:09 2021
@author: R005
"""
#Codificar un programa que solicite la carga de un valor positivo y nos muestre desde
#1 hasta el valor ingresado de uno en uno
n=int(input("Ingrese el valor final:"))
x=1
while x<=n:
print(x)
x=x+1
| [
"noreply@github.com"
] | smarulan613.noreply@github.com |
b19873da3ddd42c1513de7fe80fb969ef2c415d5 | db6b79665d35eb6a44c81c68b01e947b534e308d | /code/setup.py | ca4507f4acf8084c73ad663dd5573ebaa1eae431 | [] | no_license | davidwhogg/crushinator | ffe4536b04a1239dcf04334ea6975d367c2ac0ca | 4f0f11503af6bab5ee4498cb6981ddf8b8307339 | refs/heads/master | 2018-12-29T00:38:20.145527 | 2014-01-11T19:47:42 | 2014-01-11T19:47:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # python setup.py build_ext --inplace --rpath=...
import os
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
ext = [Extension('interpolation', ['./crushinator/interpolation.pyx'],
libraries=['gsl', 'gslcblas'],
library_dirs=['/home/rfadely/local/lib/'],
include_dirs=['/home/rfadely/local/include/', '.']),
Extension('flux_calculation', ['./crushinator/flux_calculation.pyx'])]
setup(cmdclass={'build_ext':build_ext}, ext_modules=ext)
os.system('mv interpolation.so ./crushinator/')
os.system('mv flux_calculation.so ./crushinator/')
| [
"rossfadely@gmail.com"
] | rossfadely@gmail.com |
81235696795943f3e5ef158eb7bd4a9ec3eacb3f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_145/ch14_2019_04_02_21_19_13_573782.py | c234f51f74eed63be62398e7514723c6cd5a63bd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | def calcula_volume_da_esfera(r):\n return (4*math.pi*(r**3))/3 | [
"you@example.com"
] | you@example.com |
2367b8d14a26b5f8da01129ea04dcd71079fae39 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x08-python-more_classes/2-rectangle.py | c7653413c47b30e26a5abc2a2c2a3e0c806a6936 | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | #!/usr/bin/python3
"""Defining a rectangle
"""
class Rectangle:
"""Rectangle
"""
def __init__(self, width=0, height=0):
"""initializing a Rectangle
Keyword Arguments:
width {int} -- [width of rectangle] (default: {0})
height {int} -- [height of rectangle] (default: {0})
"""
self.height = height
self.width = width
@property
def width(self):
"""width getter
Returns:
[int] -- [returns the width]
"""
return self.__width
@width.setter
def width(self, value):
"""width setter
Arguments:
value {[int]} -- [the value of width]
Raises:
TypeError: [if value is not an int]
ValueError: [if value is negative]
"""
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@property
def height(self):
"""height getter
Returns:
[int] -- [returns the height]
"""
return self.__height
@height.setter
def height(self, value):
"""height setter
Arguments:
value {[int]} -- [the value of height]
Raises:
TypeError: [if height is not an int]
ValueError: [if height is negative]
"""
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""Area of Rectangle
Returns:
[int] -- [returns the area of rectangle]
"""
return self.width * self.height
def perimeter(self):
"""Perimeter of Rectangle
Returns:
[int] -- [retunrs the perimeter of rectangle]
"""
if self.width == 0 or self.height == 0:
return 0
return (self.height + self.width) * 2
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
5ea3fa316976d2ea5d4af97f47e4e3e72025754f | 5a424888b89eb2b480cbe1bc484e5ed95427f2e1 | /plot/a_dzliu_code_Plot_CSFRD.py | 07a6c14b8fb5e0fb20988373719e910d55cf8b27 | [] | no_license | 1054/DeepFields.GalaxyModelling | 07f7d33b987613d01a900126d4720f96b3964414 | 5cd31e80782f079040577d94d046ab0728f0c3bc | refs/heads/master | 2023-01-01T09:18:27.188092 | 2020-10-25T22:48:33 | 2020-10-25T22:48:33 | 107,806,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,771 | py | #!/usr/bin/env python
#
import os, sys, re, json, numpy, astropy, matplotlib, subprocess
#matplotlib.use('Qt5Agg')
from astropy.table import Table
from astropy import units as u
from matplotlib import pyplot as plt
from matplotlib import ticker as ticker
import numpy as np
from pprint import pprint
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.725)
sys.path.append('/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plot/Common_Python_Code')
from setup_matplotlib import setup_matplotlib; setup_matplotlib()
#matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[cm]{sfmath}']
#matplotlib.rcParams['font.sans-serif'] = 'cm'
#matplotlib.rcParams['font.family'] = 'sans-serif'
from calc_galaxy_main_sequence import (
calc_SFR_MS_Speagle2014,
calc_SFR_MS_Sargent2014,
calc_SFR_MS_Whitaker2014,
calc_SFR_MS_Bethermin2015,
calc_SFR_MS_Schreiber2015,
calc_SFR_MS_Lee2015,
calc_SFR_MS_Tomczak2016,
calc_SFR_MS_Pearson2018,
calc_SFR_MS_Leslie20180901,
calc_SFR_MS_Leslie20190111,
calc_SFR_MS_Leslie20190515,
calc_SFR_MS_Leslie20190710,
calc_SFR_MS_Leslie20191212,
calc_SFR_MS_Scoville2017,
)
from calc_cosmic_star_formation_rate_density import (calc_CSFRD_Madau2014, convert_age_to_z)
#
# User Setting
#
#obs_area = 7200*u.arcmin*u.arcmin # 1.4*1.4*u.deg*u.deg
#obs_area = 1.4*1.4*u.deg*u.deg
obs_area = 1.5546582999901375*u.deg*u.deg
#obs_area = 2.0*u.deg*u.deg
print('obs_area = %s [%s]'%(obs_area.to(u.arcmin*u.arcmin).value, obs_area.to(u.arcmin*u.arcmin).unit))
print('obs_area = %s [%s]'%(obs_area.to(u.steradian).value, obs_area.to(u.steradian).unit))
#print('obs_area = %s [%s]'%(7200 * 3600 / 4.25451703e10, 'steradian')) # checked consistent
#
# Read data points
#
tb = Table.read('datatable_generated_galaxies_with_coordinates.fits')
#print(tb.colnames)
#print(tb['MSTAR'].data.shape)
#print(tb['MSTAR'][0][0], tb['SFR'][0][0])
data_lgMstar = tb['lgMstar'].data.flatten()
data_Mstar = 10**data_lgMstar
data_lgSFR = tb['lgSFR'].data.flatten()
data_SFR = 10**data_lgSFR
data_redshift = tb['z'].data.flatten()
#sys.exit()
#
# def
#
def tick_function(X):
V = cosmo.age(X).value
return ['%0.1f' % t for t in V]
#
# fig
#
fig = plt.figure(figsize=(6.8,4.8))
fig.subplots_adjust(left=0.15, right=0.95, bottom=0.105, top=0.885)
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel('Redshift', fontsize=16, labelpad=1)
ax1.set_ylabel(r'$\log_{10} \, \rho_{\mathrm{SFR}}$ [$\mathrm{M_{\odot}\,yr^{-1}\,Mpc^{-3}}$]', fontsize=17, labelpad=15)
ax1.tick_params(axis='both', labelsize=14)
ax1.tick_params(direction='in', axis='both', which='both')
ax1.tick_params(top=False, right=True, which='both')
my_tick_locations = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
ax1.set_xticks(my_tick_locations)
#ax1.set_xlim([-0.3, np.max(my_tick_locations)])
ax1.set_xlim([-0.2, 5.5])
ax1.set_ylim([-2.0, -0.2])
ax1.grid(True, ls='--', lw=0.25)
#new_tick_locations = convert_age_to_z([13.7, 2.0, 1.0, 0.5, 0.3]) #<20190915><BUGGY>#
new_tick_locations = convert_age_to_z([cosmo.age(0).value, 5.0, 3.0, 2.0, 1.0, 0.7, ])
new_tick_locations = new_tick_locations[np.argwhere(np.logical_and(new_tick_locations >= ax1.get_xlim()[0], new_tick_locations <= ax1.get_xlim()[1])).flatten()]
print('new_tick_locations', new_tick_locations)
ax2 = ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(tick_function(new_tick_locations))
ax2.set_xlabel(r"Cosmic Age [$\mathrm{Gyr}$]", fontsize=16, labelpad=6)
ax2.minorticks_off()
ax2.grid(None)
# show y minor ticks
ax1.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(0.05))
#ax1.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=99))
#ax1.yaxis.set_minor_locator(ticker.LogLocator(base=10.0, subs=np.arange(2,10)*0.1, numticks=99))
#
# z
z_edges = np.linspace(0.0, 6.0, num=30, endpoint=True) # np.array([0.02, 0.25, 0.50, 0.75, 1.00, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0])
z_centers = (z_edges[0:-1] + z_edges[1:]) / 2.0
lgPhi_SFR = z_centers * 0.0 - 99
#
# loop z bin
for i in range(len(z_edges)-1):
#
print('z %s - %s'%(z_edges[i], z_edges[i+1]))
#
z = z_centers[i]
#
#comoving_volume = ((cosmo.comoving_volume(z_edges[i+1]) - cosmo.comoving_volume(z_edges[i])) / (4.0*np.pi*u.steradian) * obs_area.to(u.steradian))
#print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
#
differntial_z_list = np.linspace(z_edges[i], z_edges[i+1], num=10, endpoint=True)
comoving_volume = np.sum((cosmo.differential_comoving_volume(differntial_z_list[1:]) * np.diff(differntial_z_list) * obs_area.to(u.steradian)))
print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
#
# select and count CSFRD
data_selection = np.logical_and.reduce((data_redshift >= z_edges[i], data_redshift < z_edges[i+1], data_SFR > 0, data_lgMstar >= 9.0))
data_Phi_SFR = np.sum(data_SFR[data_selection]) / comoving_volume.value
lgPhi_SFR[i] = np.log10(data_Phi_SFR)
#
#
Phi_SFR_MD14 = calc_CSFRD_Madau2014(z_centers)
lgPhi_SFR_MD14 = np.log10(Phi_SFR_MD14)
ax1.plot(z_centers, lgPhi_SFR_MD14, c='red', ls='solid', solid_capstyle='butt', alpha=0.8, lw=2, label=r'SMF MD14')
#
#
plot_label = r'dzliu model (lgMstar$\gtrsim$9.0)'
current_dir = os.path.basename(os.getcwd())
if re.match(r'^.*_using_([a-zA-Z0-9]+)_MS([_].*|)$', current_dir):
plot_label = plot_label + '\n(%s MS)'%(re.sub(r'^.*_using_([a-zA-Z0-9]+)_MS([_].*|)$', r'\1', current_dir))
ax1.step(z_centers, lgPhi_SFR, where='mid', alpha=0.6, label=plot_label)
#
#
#plot_legend1 = plt.legend(\
# legend_handles,
# legend_labels,
# fontsize=16, loc='upper right',
# #borderpad=0.6, borderaxespad=0.6, handlelength=2.8,
# )
#ax1.add_artist(plot_legend1)
ax1.legend(loc='upper left', ncol=2, framealpha=0.5)
#
# savefig
fig.savefig('Plot_CSFRD.pdf', transparent=True)
print('Output to "%s"!' % ('Plot_CSFRD.pdf') )
os.system('open "%s"' % ('Plot_CSFRD.pdf') )
| [
"liudz1054@gmail.com"
] | liudz1054@gmail.com |
74829d12c11a421f95e3ecf384bad58b864b91f5 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/api/serviceusage/v1/api-serviceusage-v1-py/google/api/serviceusage_v1/services/service_usage/async_client.py | c232f03d5b647b99931e9c785a407f0a231cc235 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,868 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api.serviceusage_v1.services.service_usage import pagers
from google.api.serviceusage_v1.types import resources
from google.api.serviceusage_v1.types import serviceusage
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from .transports.base import ServiceUsageTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ServiceUsageGrpcAsyncIOTransport
from .client import ServiceUsageClient
class ServiceUsageAsyncClient:
"""`Service Usage API </service-usage/docs/overview>`__"""
_client: ServiceUsageClient
DEFAULT_ENDPOINT = ServiceUsageClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ServiceUsageClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(ServiceUsageClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ServiceUsageClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ServiceUsageClient.common_folder_path)
parse_common_folder_path = staticmethod(ServiceUsageClient.parse_common_folder_path)
common_organization_path = staticmethod(ServiceUsageClient.common_organization_path)
parse_common_organization_path = staticmethod(ServiceUsageClient.parse_common_organization_path)
common_project_path = staticmethod(ServiceUsageClient.common_project_path)
parse_common_project_path = staticmethod(ServiceUsageClient.parse_common_project_path)
common_location_path = staticmethod(ServiceUsageClient.common_location_path)
parse_common_location_path = staticmethod(ServiceUsageClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceUsageAsyncClient: The constructed client.
"""
return ServiceUsageClient.from_service_account_info.__func__(ServiceUsageAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceUsageAsyncClient: The constructed client.
"""
return ServiceUsageClient.from_service_account_file.__func__(ServiceUsageAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ServiceUsageTransport:
"""Return the transport used by the client instance.
Returns:
ServiceUsageTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ServiceUsageClient).get_transport_class, type(ServiceUsageClient))
def __init__(self, *,
credentials: credentials.Credentials = None,
transport: Union[str, ServiceUsageTransport] = 'grpc_asyncio',
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the service usage client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ServiceUsageTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ServiceUsageClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def enable_service(self,
request: serviceusage.EnableServiceRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Enable a service so that it can be used with a
project.
Args:
request (:class:`google.api.serviceusage_v1.types.EnableServiceRequest`):
The request object. Request message for the
`EnableService` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.api.serviceusage_v1.types.EnableServiceResponse` Response message for the EnableService method.
This response message is assigned to the response
field of the returned Operation when that operation
is done.
"""
# Create or coerce a protobuf request object.
request = serviceusage.EnableServiceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.enable_service,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
serviceusage.EnableServiceResponse,
metadata_type=resources.OperationMetadata,
)
# Done; return the response.
return response
async def disable_service(self,
request: serviceusage.DisableServiceRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Disable a service so that it can no longer be used with a
project. This prevents unintended usage that may cause
unexpected billing charges or security leaks.
It is not valid to call the disable method on a service that is
not currently enabled. Callers will receive a
``FAILED_PRECONDITION`` status if the target service is not
currently enabled.
Args:
request (:class:`google.api.serviceusage_v1.types.DisableServiceRequest`):
The request object. Request message for the
`DisableService` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.api.serviceusage_v1.types.DisableServiceResponse` Response message for the DisableService method.
This response message is assigned to the response
field of the returned Operation when that operation
is done.
"""
# Create or coerce a protobuf request object.
request = serviceusage.DisableServiceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.disable_service,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
serviceusage.DisableServiceResponse,
metadata_type=resources.OperationMetadata,
)
# Done; return the response.
return response
async def get_service(self,
request: serviceusage.GetServiceRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Service:
r"""Returns the service configuration and enabled state
for a given service.
Args:
request (:class:`google.api.serviceusage_v1.types.GetServiceRequest`):
The request object. Request message for the `GetService`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api.serviceusage_v1.types.Service:
A service that is available for use
by the consumer.
"""
# Create or coerce a protobuf request object.
request = serviceusage.GetServiceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_service,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_services(self,
request: serviceusage.ListServicesRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListServicesAsyncPager:
r"""List all services available to the specified project, and the
current state of those services with respect to the project. The
list includes all public services, all services for which the
calling user has the ``servicemanagement.services.bind``
permission, and all services that have already been enabled on
the project. The list can be filtered to only include services
in a specific state, for example to only include services
enabled on the project.
WARNING: If you need to query enabled services frequently or
across an organization, you should use `Cloud Asset Inventory
API <https://cloud.google.com/asset-inventory/docs/apis>`__,
which provides higher throughput and richer filtering
capability.
Args:
request (:class:`google.api.serviceusage_v1.types.ListServicesRequest`):
The request object. Request message for the
`ListServices` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api.serviceusage_v1.services.service_usage.pagers.ListServicesAsyncPager:
Response message for the ListServices method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = serviceusage.ListServicesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_services,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListServicesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def batch_enable_services(self,
request: serviceusage.BatchEnableServicesRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Enable multiple services on a project. The operation is atomic:
if enabling any service fails, then the entire batch fails, and
no state changes occur. To enable a single service, use the
``EnableService`` method instead.
Args:
request (:class:`google.api.serviceusage_v1.types.BatchEnableServicesRequest`):
The request object. Request message for the
`BatchEnableServices` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.api.serviceusage_v1.types.BatchEnableServicesResponse` Response message for the BatchEnableServices method.
This response message is assigned to the response
field of the returned Operation when that operation
is done.
"""
# Create or coerce a protobuf request object.
request = serviceusage.BatchEnableServicesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_enable_services,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
serviceusage.BatchEnableServicesResponse,
metadata_type=resources.OperationMetadata,
)
# Done; return the response.
return response
async def batch_get_services(self,
request: serviceusage.BatchGetServicesRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> serviceusage.BatchGetServicesResponse:
r"""Returns the service configurations and enabled states
for a given list of services.
Args:
request (:class:`google.api.serviceusage_v1.types.BatchGetServicesRequest`):
The request object. Request message for the
`BatchGetServices` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api.serviceusage_v1.types.BatchGetServicesResponse:
Response message for the BatchGetServices method.
"""
# Create or coerce a protobuf request object.
request = serviceusage.BatchGetServicesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_get_services,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-api-serviceusage',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
'ServiceUsageAsyncClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
fd846adca759b0c9b2a65e2e835dc49ca3f384b1 | 0bb4e169444ae71b3d64522f94141a7a00054111 | /userid_to_slug.py | 2b7b1f25806f1a8740f94cc3c660ca6b7d14e5ad | [
"CC0-1.0"
] | permissive | riceissa/ea-forum-reader | 5cf02f52f767c28417a64b4b76fc57c07ab0f011 | 4c8b76b8947b41bc8f3b3067e75ef7870eb4fa6b | refs/heads/master | 2022-05-05T22:21:48.011316 | 2022-03-12T19:01:36 | 2022-03-12T19:01:36 | 156,928,401 | 11 | 3 | NOASSERTION | 2018-11-24T07:06:07 | 2018-11-09T23:16:13 | Python | UTF-8 | Python | false | false | 308 | py | #!/usr/bin/env python3
import sys
import util
# For some reason, the Algolia search result JSON only has the user ID and
# username, not the user slug. So to be able to link to the user page from
# search results, we need to conver the userid to a user slug.
print(util.userid_to_userslug(sys.argv[1]))
| [
"riceissa@gmail.com"
] | riceissa@gmail.com |
f2ca82e1a1bd182480593aac57fe76d0675b56f5 | e45d2faad9389886a82ff5176853b1ff6e37caae | /argparse/047_argparse_conflict_handler.py | 4098c31734537ec1c7e84ff61f17359e57119df6 | [] | no_license | allenmo/python_study | 6320aa4cd80fe46ccf73076015c67bdcb6338d30 | 7aff5d810ca6e791d62235d57c072a8dc14457ca | refs/heads/master | 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import argparse
parser = argparse.ArgumentParser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-f', '--foo', help='old foo help')
parser.add_argument('--foo', help='new foo help')
parser.print_help()
| [
"allen02403@gmail.com"
] | allen02403@gmail.com |
65daa93551bf2b8891afdd05edfb2d723e291f96 | f188379dc9c1e5b63e432d434c782a4d6997872b | /5_List Advanced/Lab/03. Palindrome Strings.py | 59d10bcd5a3064551f8fafa8f18d719c67768992 | [] | no_license | GalyaBorislavova/SoftUni_Python_Fundamentals_January_2021 | 39d7eb8c28f60ff3c293855b074c49ac622a6036 | 7d479fd6c8e4136fb07b765458cc00088e09767a | refs/heads/main | 2023-06-15T04:16:17.084825 | 2021-06-30T18:05:42 | 2021-06-30T18:05:42 | 381,785,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | words = input().split(" ")
searched_palindrome = input()
palindromes = [p for p in words if p == p[::-1]]
print(palindromes)
print(f"Found palindrome {palindromes.count(searched_palindrome)} times") | [
"galyaborislavova888@gmail.com"
] | galyaborislavova888@gmail.com |
db618d8285a21d6d5d76d68ec2af44087c421572 | 1725fddc758271af6ce911dfe7dcca3f2bb3233d | /service/workflow/workflow_custom_notice_service.py | e8f59ece0a9ac0dd4f12bc77c9fbd59998e0775c | [
"MIT"
] | permissive | blackholll/loonflow | 7948018324664d6a88df0616275ad4ce28400745 | b0e236b314286c5f6cc6959622c9c8505e776443 | refs/heads/master | 2023-08-22T21:32:08.223136 | 2023-06-04T13:39:27 | 2023-06-04T13:39:27 | 120,720,556 | 1,864 | 733 | MIT | 2023-07-05T23:24:18 | 2018-02-08T06:26:53 | Python | UTF-8 | Python | false | false | 5,191 | py | import json
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.workflow.models import CustomNotice
from service.base_service import BaseService
from service.common.constant_service import constant_service_ins
from service.common.log_service import auto_log
class WorkflowCustomNoticeService(BaseService):
"""
工作流通知服务
"""
def __init__(self):
pass
@classmethod
@auto_log
def get_notice_list(cls, query_value: str, page: int, per_page: int, simple: bool=False)->tuple:
"""
获取通知列表
:param query_value:
:param page:
:param per_page:
:param simple: 简单数据
:return:
"""
query_params = Q(is_deleted=False)
if query_value:
query_params &= Q(name__contains=query_value) | Q(description__contains=query_value)
custom_notice_querset = CustomNotice.objects.filter(query_params).order_by('id')
paginator = Paginator(custom_notice_querset, per_page)
try:
custom_notice_result_paginator = paginator.page(page)
except PageNotAnInteger:
custom_notice_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
custom_notice_result_paginator = paginator.page(paginator.num_pages)
custom_notice_result_object_list = custom_notice_result_paginator.object_list
custom_notice_result_restful_list = []
for custom_notice_result_object in custom_notice_result_object_list:
per_notice_data = custom_notice_result_object.get_dict()
if simple:
per_notice_data.pop("corpid")
per_notice_data.pop("corpsecret")
per_notice_data.pop("appkey")
per_notice_data.pop("appsecret")
per_notice_data.pop("hook_url")
per_notice_data.pop("hook_token")
custom_notice_result_restful_list.append(per_notice_data)
return custom_notice_result_restful_list, dict(per_page=per_page, page=page, total=paginator.count)
@classmethod
@auto_log
def add_custom_notice(cls, name: str, description: str, type_id: int, corpid: str, corpsecret: str, appkey: str,
appsecret: str, hook_url: str, hook_token: str, creator: str)->tuple:
"""
新增自定义通知记录
:param name:
:param description:
:param type_id:
:param corpid:
:param corpsecret:
:param appkey:
:param appsecret:
:param hook_url:
:param hook_token:
:param creator:
:return:
"""
notice_obj = CustomNotice(name=name, description=description, type_id=type_id, corpid=corpid,
corpsecret=corpsecret, appkey=appkey, appsecret=appsecret, hook_url=hook_url,
hook_token=hook_token, creator=creator)
notice_obj.save()
return True, dict(notice_id=notice_obj.id)
@classmethod
@auto_log
def update_custom_notice(cls, custom_notice_id: int, name: str, description: str, type_id: int,
corpid: str, corpsecret: str, appkey: str, appsecret: str, hook_url: str,
hook_token: str)->tuple:
"""
更新自定义通知
:param custom_notice_id:
:param name:
:param description:
:param hook_url:
:param hook_token:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0)
if custom_notice_obj:
custom_notice_obj.update(name=name, description=description, hook_url=hook_url, hook_token=hook_token,
type_id=type_id, corpid=corpid, corpsecret=corpsecret, appkey=appkey,
appsecret=appsecret)
else:
return False, 'the record is not existed or has been deleted'
return True, ''
@classmethod
@auto_log
def del_custom_notice(cls, custom_notice_id: int)->tuple:
"""
删除脚本
:id:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0)
if custom_notice_obj:
custom_notice_obj.update(is_deleted=True)
return True, ''
else:
return False, 'the record is not exist or has been deleted'
@classmethod
@auto_log
def get_notice_detail(cls, custom_notice_id: int)->tuple:
"""
获取通知详情
:param custom_notice_id:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0).first()
if custom_notice_obj:
custom_notice_info = custom_notice_obj.get_dict()
return True, custom_notice_info
else:
return False, 'record is not exist or has been deleted'
workflow_custom_notice_service_ins = WorkflowCustomNoticeService()
| [
"blackholll@163.com"
] | blackholll@163.com |
483133ff3733c8c7954f199313fc21a841f27a54 | ad38b9a924911b3249b9ffec01d78a2b1048fa0d | /动态调试/Immunity Debugger v1.73/Lib/test/test_class.py | 97e262adbfbd6054364005f392020d097da930ce | [] | no_license | h3len/HackerToolBox | 77c5a45553784d20104db21ac5fe8f840ca519a6 | 4397b0c25cfd0eb3f92484f396745cc664af2531 | refs/heads/master | 2020-04-04T22:57:47.376773 | 2018-10-10T15:43:06 | 2018-10-10T15:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,813 | py | "Test the functionality of Python classes implementing operators."
from test.test_support import TestFailed
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"div",
"rdiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"getslice",
"setitem",
"setslice",
"delitem",
"delslice",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "coerce",
# "hash",
# "str",
# "repr",
# "int",
# "long",
# "float",
# "oct",
# "hex",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
class AllTests:
def __coerce__(self, *args):
print "__coerce__:", args
return (self,) + args
def __hash__(self, *args):
print "__hash__:", args
return hash(id(self))
def __str__(self, *args):
print "__str__:", args
return "AllTests"
def __repr__(self, *args):
print "__repr__:", args
return "AllTests"
def __int__(self, *args):
print "__int__:", args
return 1
def __float__(self, *args):
print "__float__:", args
return 1.0
def __long__(self, *args):
print "__long__:", args
return 1L
def __oct__(self, *args):
print "__oct__:", args
return '01'
def __hex__(self, *args):
print "__hex__:", args
return '0x1'
def __cmp__(self, *args):
print "__cmp__:", args
return 0
def __del__(self, *args):
print "__del__:", args
# Synthesize AllTests methods from the names in testmeths.
method_template = """\
def __%(method)s__(self, *args):
print "__%(method)s__:", args
"""
for method in testmeths:
exec method_template % locals() in AllTests.__dict__
del method, method_template
# this also tests __init__ of course.
testme = AllTests()
# Binary operations
testme + 1
1 + testme
testme - 1
1 - testme
testme * 1
1 * testme
if 1/2 == 0:
testme / 1
1 / testme
else:
# True division is in effect, so "/" doesn't map to __div__ etc; but
# the canned expected-output file requires that __div__ etc get called.
testme.__coerce__(1)
testme.__div__(1)
testme.__coerce__(1)
testme.__rdiv__(1)
testme % 1
1 % testme
divmod(testme,1)
divmod(1, testme)
testme ** 1
1 ** testme
testme >> 1
1 >> testme
testme << 1
1 << testme
testme & 1
1 & testme
testme | 1
1 | testme
testme ^ 1
1 ^ testme
# List/dict operations
1 in testme
testme[1]
testme[1] = 1
del testme[1]
testme[:42]
testme[:42] = "The Answer"
del testme[:42]
testme[2:1024:10]
testme[2:1024:10] = "A lot"
del testme[2:1024:10]
testme[:42, ..., :24:, 24, 100]
testme[:42, ..., :24:, 24, 100] = "Strange"
del testme[:42, ..., :24:, 24, 100]
# Now remove the slice hooks to see if converting normal slices to slice
# object works.
del AllTests.__getslice__
del AllTests.__setslice__
del AllTests.__delslice__
import sys
if sys.platform[:4] != 'java':
testme[:42]
testme[:42] = "The Answer"
del testme[:42]
else:
# This works under Jython, but the actual slice values are
# different.
print "__getitem__: (slice(0, 42, None),)"
print "__setitem__: (slice(0, 42, None), 'The Answer')"
print "__delitem__: (slice(0, 42, None),)"
# Unary operations
-testme
+testme
abs(testme)
int(testme)
long(testme)
float(testme)
oct(testme)
hex(testme)
# And the rest...
hash(testme)
repr(testme)
str(testme)
testme == 1
testme < 1
testme > 1
testme <> 1
testme != 1
1 == testme
1 < testme
1 > testme
1 <> testme
1 != testme
# This test has to be last (duh.)
del testme
if sys.platform[:4] == 'java':
import java
java.lang.System.gc()
# Interfering tests
class ExtraTests:
def __getattr__(self, *args):
print "__getattr__:", args
return "SomeVal"
def __setattr__(self, *args):
print "__setattr__:", args
def __delattr__(self, *args):
print "__delattr__:", args
testme = ExtraTests()
testme.spam
testme.eggs = "spam, spam, spam and ham"
del testme.cardinal
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__long__ = __int__
__str__ = __int__
__repr__ = __int__
__oct__ = __int__
__hex__ = __int__
def check_exc(stmt, exception):
"""Raise TestFailed if executing 'stmt' does not raise 'exception'
"""
try:
exec stmt
except exception:
pass
else:
raise TestFailed, "%s should raise %s" % (stmt, exception)
check_exc("int(BadTypeClass())", TypeError)
check_exc("float(BadTypeClass())", TypeError)
check_exc("long(BadTypeClass())", TypeError)
check_exc("str(BadTypeClass())", TypeError)
check_exc("repr(BadTypeClass())", TypeError)
check_exc("oct(BadTypeClass())", TypeError)
check_exc("hex(BadTypeClass())", TypeError)
# mixing up ints and longs is okay
class IntLongMixClass:
def __int__(self):
return 0L
def __long__(self):
return 0
try:
int(IntLongMixClass())
except TypeError:
raise TestFailed, "TypeError should not be raised"
try:
long(IntLongMixClass())
except TypeError:
raise TestFailed, "TypeError should not be raised"
# Test correct errors from hash() on objects with comparisons but no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C1:
def __cmp__(self, other): return 0
check_exc("hash(C1())", TypeError)
class C2:
def __eq__(self, other): return 1
check_exc("hash(C2())", TypeError)
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RuntimeError:
pass
else:
raise TestFailed, "how could this not have overflowed the stack?"
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError, "booh"
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError, x:
if str(x) != "booh":
print "attribute error for A().a got masked:", str(x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
I() # In debug mode, printed XXX undetected error and raises AttributeError
except AttributeError, x:
pass
else:
print "attribute error for I.__init__ got masked"
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return self.x
class B(A):
pass
a1 = A(1)
a2 = A(2)
assert a1.f == a1.f
assert a1.f != a2.f
assert a1.f != a1.g
assert a1.f == A(1).f
assert hash(a1.f) == hash(a1.f)
assert hash(a1.f) == hash(A(1).f)
assert A.f != a1.f
assert A.f != A.g
assert B.f == A.f
assert hash(B.f) == hash(A.f)
# the following triggers a SystemError in 2.4
a = A(hash(A.f.im_func)^(-1))
hash(a.f)
| [
"redleavessun@gmail.com"
] | redleavessun@gmail.com |
326b022cdca18e93633916731c94410b1b38a0bb | 984b7f3efe488e09080b96fa45a92e8d3396c86a | /ELFMiner.py | 9cc6e0028b4fd74c2063430d0c39f29a9c40760a | [
"MIT"
] | permissive | zychia/ELF-Miner | c0ee995a17b41e559ac6ff7914c2f26be38ed337 | 438b9dcd65a3d7a79ef830c1a5517891e12cfa7e | refs/heads/master | 2021-02-08T00:26:49.991431 | 2018-12-21T13:50:53 | 2018-12-21T13:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,965 | py | from readelf3 import *
import sys
import subprocess
from subprocess import call
import pandas as pd
import csv
import os
features = []
headers = []
final_report = "a.txt"
section_list_size = 0
def prepare_headers():
headers.extend(["Name", "Identification", "MachineType", "ELFVersion", "EntryPointAddress", "ProgramHeaderOffset", "SectionHeaderOffset", "Flags", "HeaderSize", "SizeProgramHeader", "EntriesProgram", "SizeSectionHeader", "EntriesSection", "StringTableIndex"])
print(len(headers))
sections_list = [".text", ".bss", ".comment", ".data", ".data1", ".debug", ".dynamic", ".dynstr", ".dynsym", ".fini", ".hash", ".gnu.hash", ".init", ".got", ".interp", ".line", ".note", ".plt", ".rodata", "rodata1", ".shstrtab", ".strtab", ".symtab", ".sdata", ".sbss", ".lit8", ".gptab", ".conflict", ".tdesc", ".lit4", ".reginfo", ".liblist", ".rel.dyn", ".rel.plt", ".got.plt"]
suffix_list = ["_type", "_flags", "_size", "_entsize", "_table_index_link", "_info", "_alignment"]
section_list_size = len(sections_list)
for i in sections_list:
a = []
for j in suffix_list:
a.append(i+j)
headers.extend(a)
print(len(headers))
def input_file(file):
features.append(file)
print("Input file: %s" % file)
try:
with open(file, 'rb') as file:
try:
elf = ReadElf(file, sys.stdout)
return elf
except ELFError as ex:
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
except(IOError):
print_err("IO Error when opening file")
else:
sys.exit(1)
return None
def elf_headers(elf):
identification, file_class, data, version, abi, abi_version, type_file, machine, version, entry_point_address, start_program_headers, start_section_headers, flags, header_size, size_program_header, num_program_header, size_section_header, num_section_header, str_table_ind = elf.display_file_header()
features.extend([identification, machine, version, entry_point_address, start_program_headers, start_section_headers, flags, header_size, size_program_header, num_program_header, size_section_header, num_section_header, str_table_ind])
def section_headers(elf):
# elf = input_file()
sections_data_list = process(sys.argv[1])[0][1:]
features_new = [""] * 245
features.extend(features_new)
for i, section_data in enumerate(sections_data_list):
try:
ind = headers.index(section_data[0]+"_type")
for j, value in enumerate(section_data[1:]):
features[ind+j] = value
except:
continue
def symbols_table(file):
dyna_st_type="NOTYPE|OBJECT|FUNC|SECTION|FILE|COMMON|SPARC_REGISTER|TLS|LOOS|HIOS|LOPROC|HIPROC"
dyna_st_bind="LOCAL|GLOBAL|WEAK|LOOS|HIOS|LOPROC|HIPROC"
syms = subprocess.check_output(["readelf","-s",file])
dynS_type={'STB_LOCAL': 0, 'dynamic_s_c': 0, 'STT_NOTYPE_STB_GLOBAL': 0, 'STT_OBJECT_STB_WEAK': 0, 'STB_GLOBAL': 0, 'STB_WEAK': 0, 'STT_NOTYPE_STB_LOCAL': 0, 'STT_FUNC': 0, 'STT_FUNC_STB_GLOBAL': 0, 'STT_OBJECT_STB_GLOBAL': 0, 'STT_NOTYPE_STB_WEAK': 0, 'STT_NOTYPE': 0, 'STT_OBJECT': 0, 'STT_FUNC_STB_WEAK': 0, 'STT_FUNC_STB_LOCAL': 0, 'STT_OBJECT_STB_LOCAL': 0}
symT_name = {'s_STB_LOCAL': 0, 'symbol_tab': 0, 's_STT_NOTYPE_STB_GLOBAL': 0, 's_STT_OBJECT_STB_WEAK': 0, 's_STB_GLOBAL': 0, 's_STB_WEAK': 0, 's_STT_NOTYPE_STB_LOCAL': 0, 's_STT_FUNC': 0, 's_STT_FUNC_STB_GLOBAL': 0, 's_STT_OBJECT_STB_GLOBAL': 0, 's_STT_NOTYPE_STB_WEAK': 0, 's_STT_NOTYPE': 0, 's_STT_OBJECT': 0, 's_STT_FUNC_STB_WEAK': 0, 's_STT_FUNC_STB_LOCAL': 0, 's_STT_OBJECT_STB_LOCAL': 0, 's_STT_OBJECT_STB_LOCAL': 0, 's_STT_SECTION_STB_LOCAL': 0, 's_STT_SECTION_STB_GLOBAL': 0}
dynF_name={}
symF_name = {}
f = open(final_report, 'w')
f.write(syms)
f.close()
with open(final_report,'r') as file:
flag=0
for line in file :
#print line.split()
if len(line.split())>3 and line.split()[0]=='Symbol'and line.split()[2]=="'.dynsym'":
count=int(line.split()[4])
dynS_type['dynamic_s_c']=count
flag=1
#print count
if flag==1 and len(line.split())>3:
if line.split()[3] in dyna_st_type:
if 'STT_'+line.split()[3] in dynS_type:
dynS_type['STT_'+line.split()[3]]+=1
if line.split()[3]=='FUNC':
x=line.split()[7]
dynF_name[x[:x.find('@')]]=1
if line.split()[4] in dyna_st_bind:
if 'STB_'+line.split()[4] in dynS_type:
dynS_type['STB_'+line.split()[4]]+=1
if 'STT_'+line.split()[3]+'_STB_'+line.split()[4] in dynS_type:
dynS_type['STT_'+line.split()[3]+'_STB_'+line.split()[4]]+=1
if flag==1 and len(line.split())==0:
flag=0
if len(line.split())>3 and line.split()[0]=='Symbol'and line.split()[2]=="'.symtab'":
count=int(line.split()[4])
symT_name['symbol_tab']=count
flag = 2
if flag == 2 and len(line.split())>3:
if line.split()[3] in dyna_st_type:
if 's_STT_'+line.split()[3] in symT_name:
symT_name['s_STT_'+line.split()[3]]+=1
if line.split()[3]=='FUNC':
x=line.split()[7]
symF_name[x[:x.find('@')]]=1
if line.split()[4] in dyna_st_bind:
if 's_STB_'+line.split()[4] in symT_name:
symT_name['s_STB_'+line.split()[4]]+=1
if 's_STT_'+line.split()[3]+'_STB_'+line.split()[4] in symT_name:
symT_name['s_STT_'+line.split()[3]+'_STB_'+line.split()[4]]+=1
for i in dynS_type.items():
headers.append(i[0])
features.append(i[1])
for i in symT_name.items():
headers.append(i[0])
features.append(i[1])
def dynamic_section(file):
dynamic = subprocess.check_output(["readelf","-d", file])
f = open(final_report, 'w')
f.write(dynamic)
f.close()
dyna_name="NULL|NEEDED|PLTRELSZ|PLTGOT|HASH|STRTAB|SYMTAB|RELA|RELASZ|RELAENT|STRSZ|SYMENT|INIT|FINI|SONAME|RPATH|SYMBOLIC|REL|RELSZ|RELENT|PLTREL|DEBUG|TEXTREL|JMPREL|POSFLAG_1|BIND_NOW|INIT_ARRAY|FINI_ARRAY|INIT_ARRAYSZ|FINI_ARRAYSZ|RUNPATH|FLAGS|ENCODING|PREINIT_ARRAY|PREINIT_ARRAYSZ|MAXPOSTAGS|SUNW_AUXILIARY|SUNW_RTLDINF|SUNW_FILTER|SUNW_CAP|SUNW_SYMTAB|SUNW_SYMSZ|SUNW_ENCODING|SUNW_SORTENT|SUNW_SYMSORT|SUNW_SYMSORTSZ|SUNW_TLSSORT|SUNW_TLSSORTSZ|SUNW_CAPINFO|SUNW_STRPAD|SUNW_CAPCHAIN|SUNW_LDMACH|SUNW_CAPCHAINENT|SUNW_CAPCHAINSZ|SYMINFO|SYMINENT|SYMINSZ|VERDEF|VERDEFNUM|VERNEED|VERNEEDNUM|RELACOUNT|RELCOUNT|AUXILIARY|FILTER|CHECKSUM|MOVEENT|MOVESZ|MOVETAB|CONFIG|DEPAUDIT|AUDIT|FLAGS_1|SPARC_REGISTER"
dynamic_name={'DYNRELAENT': 0, 'DYNRPATH': 0, 'DYNFINI': 0, 'DYNVERNEEDNUM': 0, 'DYNINIT_ARRAY': 0, 'DYNSTRSZ': 0, 'DYNSTRTAB': 0, 'DYNRELENT': 0, 'DYN': 0, 'DYNSYMTAB': 0, 'DYNFINI_ARRAYSZ': 0, 'DYNNEEDED': 0, 'DYNSYMENT': 0, 'DYNINIT': 0, 'DYNRELSZ': 0, 'DYNINIT_ARRAYSZ': 0, 'DYNVERNEED': 0, 'DYNRELASZ': 0, 'DYNREL': 0, 'DYNRELA': 0, 'DYNFINI_ARRAY': 0, 'DYNHASH': 0, 'DYNJMPREL': 0, 'DYNDEBUG': 0, 'DYNPLTGOT': 0, 'DYNNULL': 0, 'DYNPLTRELSZ': 0, 'DYNPLTREL': 0, 'VERSYM': 0, 'DYNCOUNT': 0}
count = 0
with open(final_report,'r') as file:
for line in file :
if len(line.split())>1:
x=line.split()[1]
x=x[1:len(x)-1]
if x in dyna_name:
if 'DYN'+x in dynamic_name:
dynamic_name['DYN'+x] += 1
count+=1
dynamic_name['DYNCOUNT'] = count
for i in dynamic_name.items():
headers.append(i[0])
features.append(i[1])
def relocation_section(file):
reloc_type="R_386_NONE|R_386_32|R_386_PC32|R_386_GOT32|R_386_PLT32|R_386_COPY|R_386_GLOB_DAT|R_386_JUMP_SLOT|R_386_RELATIVE|R_386_GOTOFF|R_386_GOTPC|R_386_32PLT|R_386_16|R_386_PC16|R_386_8|R_386_PC8|R_386_SIZE32"
r_type = {}
for i in reloc_type.split('|'):
r_type[i] = 0
reloc = subprocess.check_output(["readelf", "-r", file])
f = open(final_report, 'w')
f.write(reloc)
f.close()
with open(final_report,'r') as file:
for line in file :
if len(line.split())>2 :
rt = line.split()[2]
if rt in reloc_type:
if rt in r_type.keys():
r_type[rt] += 1
else:
r_type[rt] = 1
for i in r_type.items():
headers.append(i[0])
features.append(i[1])
def got_size():
try:
ind1 = headers.index(".got_size")
val1 = int(features[ind1], 16)
except:
val1 = 0
try:
ind2 = headers.index(".got.plt_size")
val2 = int(features[ind2], 16)
except:
val2 = 0
value = val1 + val2
headers.append("GOT_SIZE")
features.append(value)
def hash_table_size():
try:
ind1 = headers.index(".gnu.hash_size")
val1 = int(features[ind1], 16)
except:
val1 = 0
try:
ind2 = headers.index(".hash_size")
val2 = int(features[ind2], 16)
except:
val2 = 0
value = val1 + val2
headers.append("HASH_SIZE")
features.append(value)
def write_csv():
# print(features)
# print(headers)
if not os.path.exists('./results.csv'):
with open("results.csv", "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(headers)
with open("results.csv", "ab") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
# writer.writerow(headers)
writer.writerow(features)
if __name__ == "__main__":
file = sys.argv[1]
prepare_headers()
elf = input_file(file)
elf_headers(elf)
section_headers(elf)
symbols_table(file)
dynamic_section(file)
relocation_section(file)
got_size()
hash_table_size()
write_csv() | [
"shreyansh.pettswood@gmail.com"
] | shreyansh.pettswood@gmail.com |
307765967f950baa6009e7ca57e4b343db27a692 | 17f122497a3fb90105cb35b08b8ba4d2379831b5 | /papyrobot/utils/answer.py | 799e31d5ed4b7e2f9affa095110742d0a317dc8e | [] | no_license | Zepmanbc/oc_dapython_pr7 | 198084d7e9c37a8949186c3ff4f89d59eaf54d2f | dc2d38ba2164669167423c80d94b52d7257a548e | refs/heads/master | 2020-04-25T17:39:01.008881 | 2019-04-17T20:33:16 | 2019-04-17T20:33:16 | 172,956,325 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | #! /usr/bin/env python
"""Random Answer module"""
import json
from random import randint
class Answer():
"""Answer class.
answer = Amswer()
print(answer.response(category))
return: str
Categories:
intro
introduce_story
no_result
sentences are in 'papyrobot/static/json/dialog.json'
"""
def __init__(self):
"""Initialise sentenses."""
with open('papyrobot/static/json/dialog.json') as json_data:
self.dialog = json.load(json_data)
def response(self, category):
"""Return a sentence for a choosen category.
Arg:
category (str): from self.dialog keys
Return:
str: randomly choosen sentence forn the category
Error:
KeyError: if category unknown
"""
if category in self.dialog.keys():
return self.dialog[category][randint(0, len(self.dialog[category]) - 1)]
else:
raise KeyError("Incorrect Category")
if __name__ == "__main__":
# answer = Answer()
# print(answer.response('ff'))
pass
| [
"zepman@gmail.com"
] | zepman@gmail.com |
55efff42900bca69bb0bc655f601a04c0d1dff87 | 48c522df53c37a7b9bea3d17f403556e01eeb24c | /ubuntu-20-04-scripts/build_ubuntu_dataset/build_ubuntu_dataset_with_radare2.py | 78619e21c78a2a8f938b82eba161cdf9b8d04242 | [] | no_license | flobotics/func_sign_prob | 32ee8a8e4d1e33bec9253cd21e7609827d370fc9 | 18000e04ea108fb2530a50a9de4f8996cde398f0 | refs/heads/master | 2023-02-10T19:08:01.536243 | 2021-01-02T23:25:38 | 2021-01-02T23:25:38 | 292,960,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,267 | py | import subprocess
import os
import tensorflow as tf
#from tensorflow import keras
#from tensorflow.keras.preprocessing.text import Tokenizer
import pexpect
import numpy as np
import pickle
import getopt
import sys
from multiprocessing import Pool
### amazon cloud aws path
user_home_path = os.path.expanduser('~')
base_path = user_home_path + "/git/func_sign_prob/"
### google cloud gcp path
#base_path = user_home_path + "/git/func_sign_prob/"
### virtualbox path
#base_path = user_home_path + "/git/test2/func_sign_prob/"
### dir where packages-* files are located
config_dir = "ubuntu-20-04-config/"
### dir where the build pickle files are stored
pickles_dir = "ubuntu-20-04-pickles/"
### if aws or gcp is used
gcloud = False
###aws c5d.x12large
nr_of_cpus = 48
###virtualbox
#nr_of_cpus = 2
def parseArgs():
short_opts = 'hw:u:p:t:c:b:v:'
long_opts = ['work-dir=', 'git-user=', 'git-pwd=', 'tfrecord-save-dir=', 'config-dir=', 'ubuntu-pwd=', 'verbose=']
config = dict()
config['work_dir'] = ''
config['tfrecord_save_dir'] = ''
config['config_dir'] = ''
config['git_user'] = ''
config['git_pwd'] = ''
config['ubuntu_pwd'] = ''
config['verbose'] = ''
try:
args, rest = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as msg:
print(msg)
print(f'Call with argument -h to see help')
exit()
for option_key, option_value in args:
if option_key in ('-w', '--work-dir'):
config['work_dir'] = option_value[1:]
elif option_key in ('-u', '--git-user'):
config['git_user'] = option_value[1:]
elif option_key in ('-p', '--git-pwd'):
config['git_pwd'] = option_value[1:]
elif option_key in ('-t', '--tfrecord-save-dir'):
config['tfrecord_save_dir'] = option_value[1:]
elif option_key in ('-c', '--config-dir'):
config['config_dir'] = option_value[1:]
elif option_key in ('-b', '--ubuntu-pwd'):
config['ubuntu_pwd'] = option_value[1:]
elif option_key in ('-v', '--verbose'):
if option_value[1:] in 'False':
config['verbose'] = False
else:
config['verbose'] = True
elif option_key in ('-h'):
print(f'<optional> -w or --work-dir The directory where all work is done. Default: /tmp/work')
print(f'<optional> -u or --git-user The username for github repo')
print(f'<optional> -p or --git-pwd The password for github repo')
print(f'<optional> -c or --config-dir The directory to save config files to run this script twice or more, without \
doing the same packages again')
print(f'<optional> -b or --ubuntu-pwd The ubuntu user password to install packages with apt')
if config['work_dir'] == '':
config['work_dir'] = '/tmp/work/'
if config['tfrecord_save_dir'] == '':
config['tfrecord_save_dir'] = config['work_dir'] + 'tfrecord_files/'
if config['config_dir'] == '':
config['config_dir'] = config['work_dir'] + 'config-files/'
if config['git_user'] == '':
config['git_user'] = ''
if config['git_pwd'] == '':
config['git_pwd'] = ''
if config['ubuntu_pwd'] == '':
config['ubuntu_pwd'] = ''
if config['verbose'] == '':
config['verbose'] = True
###check little down for more configs
###configs without argument, but perhaps depend on configs-with-arguments
config['filtered_out_config_file'] = config['config_dir'] + 'package-filtered-out.txt'
config['package_all_config_file'] = config['config_dir'] + 'package-all.txt'
config['package_work_config_file'] = config['config_dir'] + 'package-work.txt'
config['package_dont_work_config_file'] = config['config_dir'] + 'package-dont-work.txt'
config['package_binaries_config_file'] = config['config_dir'] + 'package-binaries.txt'
return config
### get a list with all packages with ending -dbgsym
def get_all_ubuntu_dbgsym_packages(verbose=False):
pack_dbgsym_list = list()
pack_with_dbgsym = subprocess.run(['apt-cache', 'search', 'dbgsym'], capture_output=True, universal_newlines=True)
pack_with_dbgsym_out = pack_with_dbgsym.stdout
pack_with_dbgsym_out = pack_with_dbgsym_out.split('\n')
for l in pack_with_dbgsym_out:
if verbose and l:
print(f'Pkg: {l}')
if l.split() and l.split()[0].endswith('-dbgsym'):
if verbose:
print('Pkg ends with -dbgsym, add to list')
pack_dbgsym_list.append(l.split()[0])
else:
if verbose:
print('Pkg ends not with -dbgsym, dont add to list')
pass
if verbose:
print(f'We got >{len(pack_dbgsym_list)}< pkgs in our dbgsym list')
if len(pack_dbgsym_list) == 0:
print("Install ubuntu debug symbol packages")
exit()
return pack_dbgsym_list
def filter_dbgsym_package_list(dbgsym_list, config, verbose=False):
new_list = list()
for item in dbgsym_list:
subItem = item.split()[0]
filtered_out = False
if subItem.startswith('lib') or subItem.startswith('firmware'):
if verbose:
print(f'Filter out, because of >lib,firmware< {subItem}')
filtered_out = True
pass
elif 'plugin' in subItem:
if verbose:
print(f'Filter out, because of >plugin< {subItem}')
filtered_out = True
pass
elif 'linux' in subItem:
if verbose:
print(f'Filter out, because of >linux< {subItem}')
filtered_out = True
pass
else:
if verbose:
print(f'Add to filtered list >{subItem}<')
new_list.append(subItem)
### for later inspection what packages we filtered out
if filtered_out:
#file = open(base_path + config_dir + "package-filtered-out.txt", "a+")
file = open(config['filtered_out_config_file'], "a+")
if verbose:
print(f"Write to >{config['filtered_out_config_file']}< file: {subItem}")
file.write(str(subItem) + '\n')
file.close()
if verbose:
print(f'Length of filtered_pkgs_with_dbgsym:{len(new_list)}')
return new_list
def get_binaries_in_package(package, config, verbose=False):
new_binaries_in_package = list()
package_work = list()
c = 0
already_done = False
binaries_in_package = list()
f_without_dbgsym = package.replace('-dbgsym', '')
already_done = False
#check if we got this package already
if verbose:
print(f"Check in file >{config['package_all_config_file']}< if we already processed this package")
file = open(config['package_all_config_file'], "r+")
for pack in file:
#print(f'pack:{pack} f_without_dbgsym:{f_without_dbgsym}')
if f_without_dbgsym in pack:
if verbose:
print(f"Skip package >{f_without_dbgsym}<, already in package-all.txt file")
already_done = True
break
if verbose and not already_done:
print(f"Package >{f_without_dbgsym}< not in >{config['package_all_config_file']}< file")
file.close()
if not already_done:
###we write the package name into package-all.txt to know that we got it already
file = open(config['package_all_config_file'], "a+")
if verbose:
print(f"Write to >{config['package_all_config_file']}< : {f_without_dbgsym}")
file.write(str(f_without_dbgsym) + '\n')
file.close()
###install the package
if verbose:
print(f'Installing >{f_without_dbgsym}< with apt')
child = pexpect.spawn('sudo DEBIAN_FRONTEND=noninteractive apt install -y {0}'.format(f_without_dbgsym), timeout=None)
if not gcloud:
child.expect(':', timeout=None)
# enter the password
child.sendline(config['ubuntu_pwd'] + '\n')
#print(child.read())
tmp = child.read()
###check with dpkg -L what files are installed and if some binaries are there
dpkg_proc = subprocess.run(['dpkg', '-L', f_without_dbgsym], capture_output=True, universal_newlines=True)
dpkg_proc_out = dpkg_proc.stdout
dpkg_proc_out = dpkg_proc_out.split('\n')
for path in dpkg_proc_out:
if 'bin' in path:
#print(f'bin-path:{path}')
basename = os.path.basename(path)
#print(f'base-bin-path:{basename}')
if 'bin' not in basename:
#print(f'filtered-basename:{basename}')
binaries_in_package.append(path)
#print(f'dpkg_proc_out: {dpkg_proc_out}')
###if we found some binaries in package, we install the -dbgsym package
if len(binaries_in_package) > 0:
if verbose:
print(f'Found binaries in package, install >{package}< with apt now')
child = pexpect.spawn('sudo DEBIAN_FRONTEND=noninteractive apt install -y {0}'.format(package), timeout=None)
### if you run in google cloud, it directly installs the pkg
if not gcloud:
child.expect(':', timeout=None)
### enter the password
child.sendline(config['ubuntu_pwd'] + '\n')
#print(child.read())
tmp = child.read()
if verbose:
print(f'In package >{f_without_dbgsym}< are these binaries: {binaries_in_package}')
###check if binaries are binaries or scripts,etc.
real_binaries_in_package = list()
for b in binaries_in_package:
file_proc = subprocess.run(['file', b], capture_output=True, universal_newlines=True)
file_proc_out = file_proc.stdout
file_proc_out = file_proc_out.split('\n')
for line in file_proc_out:
if ('ELF 64-bit LSB shared object' in line) or ('ELF 64-bit LSB executable' in line):
real_binaries_in_package.append(b)
if verbose:
print(f'Real binaries:{real_binaries_in_package}')
###Write package to package-work.txt, to know that this package got binaries
if len(real_binaries_in_package) > 0:
file = open(config['package_work_config_file'], "a+")
if verbose:
print(f"Write to >{config['package_work_config_file']}<: {f_without_dbgsym}")
file.write(str(f_without_dbgsym) + '\n')
file.close()
###Write package to package-dontwork.txt, to know that this package got NO binaries
else:
file = open(config['package_dont_work_config_file'], "a+")
if verbose:
print(f"Write to >{config['package_dont_work_config_file']}< : {f_without_dbgsym}")
file.write(str(f_without_dbgsym) + '\n')
file.close()
###check if we already got these binaries in our package-binaries.txt
#new_binaries_in_package = list()
found_bin = False
if len(real_binaries_in_package) > 0:
file = open(config['package_binaries_config_file'], "r+")
#check if binary is still in the file, if not ,put it into new list
for b in real_binaries_in_package:
#print(f'b:{b}')
for stored_bin in file:
#print(f's:{stored_bin}')
if b in stored_bin:
#print("found it in file")
found_bin = True
break
else:
pass
if not found_bin:
new_binaries_in_package.append(b)
else:
found_bin = False
file.close()
if len(new_binaries_in_package) > 0:
file = open(config['package_binaries_config_file'], "a+")
if verbose:
print(f"Write to >{config['package_binaries_config_file']}< : {new_binaries_in_package}")
for b in new_binaries_in_package:
file.write(str(b) + '\n')
file.close()
else:
if verbose:
print(f"No binaries to write to >{config['package_binaries_config_file']}<")
pass
return new_binaries_in_package
def get_function_signatures_and_ret_types(binaryName, verbose=False):
funcs_and_ret_types = list()
all_funcs = list()
ret_types = set()
baseFileName = ''
### radare2 -A -q -c afl /tmp/testapp
if verbose:
print(f'Get function-signatures and return-types with "gdb info functions" from >{binaryName}<')
gdb_output = subprocess.run(["gdb", "-batch", "-ex", "file {}".format(binaryName), "-ex", "info functions"], capture_output=True, universal_newlines=True)
out = gdb_output.stdout
out_list = out.split('\n')
for line in out_list:
linesplit = line.split()
#print(f'linesplit: {linesplit}')
if linesplit:
# Get filename, where the following functions are inside
if line.split()[0] == 'File':
filename = line.split()[1]
#print(f'filename {filename}')
baseFileName = os.path.basename(filename)
if baseFileName[-1] == ':':
baseFileName = baseFileName[:-1]
#print(f'filename-filter: {baseFileName}')
# Get function signature
if line.split()[0][0].isnumeric():
#found func name
funcSignature = line.split()[1:]
funcSignature = ' '.join(funcSignature)
#print(f'funcname: {funcname}')
#Get the return type from the function signature
if '(' in funcSignature:
idx = funcSignature.index('(')
#print(idx)
new_idx = idx
for c in funcSignature[idx-1::-1]:
#print(c)
new_idx -= 1
#if found_ret_type == False:
if c == '*' or c == ' ':
found_ret_type = True
ret_type = funcSignature[:new_idx+1]
ret_types.add(ret_type)
funcName = funcSignature[new_idx+1:idx]
#print(f'funcName: {funcName}')
if funcSignature and ret_type and funcName and baseFileName:
funcs_and_ret_types.append((funcSignature, ret_type, funcName, baseFileName))
#print(funcs_and_ret_types[0])
#if 'enum' in ret_type:
#print(f'ret_type: {ret_type}')
break
else:
#print(f"scan till funcname end, now at: {c}")
pass
return funcs_and_ret_types
def proc_get_types_from_names(ret_type, binary_name):
verbose = False
###ask gdb for type
#type: ['type', '=', 'int', '(*)(int,', 'int)']
#type: ['type', '=', 'int', '(*)(WORD_LIST', '*)']
gdb_output_ptype = subprocess.run(["gdb", "-batch", "-ex", "file {0}".format(binary_name), "-ex", "ptype {0}".format(ret_type)], capture_output=True, universal_newlines=True)
out_ptype = gdb_output_ptype.stdout
if verbose:
print(f'gdb_ptype: {out_ptype}', flush=True)
return out_ptype.strip()
def get_types_from_names(funcs_and_ret_types, binary_name, verbose=False):
funcs_and_ret_types_filtered = list()
proc_ret_type_list = list()
# does not find ** ?????
#legal_types = ['void', 'void *', '**' 'unsigned', 'char', 'static', '_Bool', 'int', 'wchar_t',
# 'ssize_t', 'unsigned', 'struct', 'long', 'enum']
##, 'static enum char_class '
legal_types = ['static unsigned char *', 'static char *', 'char **', 'struct', 'static _Bool ',
'static int ', 'static char **',
'long', 'static struct *', 'const char **', 'long ', 'void',
'unsigned char *', 'void *', 'void ', 'int *', 'char ','char *',
'static void ', '_Bool ', 'unsigned int ', 'int', 'int ', 'unsigned long']
counter = -1
replace_str = ''
if verbose:
print(f'Binary_name:{binary_name}')
print(f'len(funcs_and_ret_types):{len(funcs_and_ret_types)}')
if len(funcs_and_ret_types) < 1:
print(f'len funcs_and_ret_types: {len(funcs_and_ret_types)}')
return funcs_and_ret_types_filtered
p = Pool(nr_of_cpus)
for a, ret_type, funcName, baseFileName in funcs_and_ret_types:
proc_ret_type_list.append((ret_type, binary_name))
all_ret_types = p.starmap(proc_get_types_from_names, proc_ret_type_list)
p.close()
p.join()
c = 0
if all_ret_types:
for func, ret_type, funcName, baseFileName in funcs_and_ret_types:
if func and funcName and baseFileName and all_ret_types[c]:
if "DELETE" in all_ret_types[c]:
if verbose:
print("Delete return type")
pass
else:
funcs_and_ret_types_filtered.append((func, all_ret_types[c], funcName, baseFileName ))
c += 1
return funcs_and_ret_types_filtered
def proc_disas(funcName, baseFileName, binary_name):
verbose = False
gdb_output_disas = subprocess.run(["gdb", "-batch", "-ex", "file {0}".format(binary_name), "-ex", "disassemble {0}".format(funcName)], capture_output=True, universal_newlines=True)
out = gdb_output_disas.stdout
out_list = out.split('\n')
out_split = list()
disas_list = []
for out_list_item in out_list:
if 'Dump of assembler code' in out_list_item:
if verbose:
print('Start of assembler code')
pass
elif '\t' in out_list_item:
if verbose:
print(f'out_list_item: {out_list_item}')
out_split = out_list_item.split('\t')
if verbose:
print(f'out_split[1]: {out_split[1]}')
out_split_val = out_split[1]
###remove comments
if '<' in out_split_val:
out_split_idx = out_split_val.index('<')
out_split_val = out_split_val[:out_split_idx]
if '#' in out_split_val:
out_split_idx = out_split_val.index('#')
out_split_val = out_split_val[:out_split_idx]
###remove trailing whitespace which could result from above 'remove comments'
out_split_val = out_split_val.rstrip()
###make space between these signs to better split
out_split_val = out_split_val.replace(',', ' , ')
out_split_val = out_split_val.replace('(', ' ( ')
out_split_val = out_split_val.replace(')', ' ) ')
out_split_val = out_split_val.replace(':', ' : ')
###replace all numbers with 0x or -0x...
v = list()
for val in out_split_val.split():
if '0x' in val:
if val[0] == '$':
if val[1] == '-':
v.append('$-0x')
else:
v.append('$0x')
elif val[0] == '-':
v.append('-0x')
else:
v.append('0x')
else:
v.append(val)
out_split_val = ' '.join(v)
if verbose:
print(f'One assembly line after filtering:{out_split_val}')
if out_split_val:
disas_list.append(out_split_val)
else:
if verbose:
print(f"Mostly empty line or end of assembly or SOMETHING WRONG:{out_list_item}")
pass
return disas_list
def get_disassemble(funcs_and_ret_types_filtered, binary_name, verbose=False):
dataset = list()
disas_list = list()
proc_disas_list = list()
if len(funcs_and_ret_types_filtered) < 1:
print(f'len funcs_and_ret_types_filtered: {len(funcs_and_ret_types_filtered)}')
return dataset
p = Pool(nr_of_cpus)
for a,b,funcName, baseFileName in funcs_and_ret_types_filtered:
proc_disas_list.append((funcName, baseFileName, binary_name))
all_disas = p.starmap(proc_disas, proc_disas_list)
p.close()
p.join()
#for disas in all_disas:
c = 0
if all_disas:
for a,b,funcName, baseFileName in funcs_and_ret_types_filtered:
if a and b and funcName and baseFileName and all_disas[c]:
dataset.append((a,b,funcName, baseFileName, all_disas[c]))
c += 1
if verbose:
print(f'One dataset item: dataset[0]: {next(iter(dataset))}')
return dataset
### get at&t or intel disas from funcName
def proc_disas_raw(funcName, binary_name, disas_flavor, verbose=False):
disas_list = []
gdb_output_disas = subprocess.run(["gdb",
"-batch",
"-ex",
"set disassembly-flavor {0}".format(disas_flavor),
"-ex",
"file {0}".format(binary_name),
"-ex",
"disassemble {0}".format(funcName)],
capture_output=True,
universal_newlines=True)
out = gdb_output_disas.stdout
out_list = out.split('\n')
for out_list_item in out_list:
if 'Dump of assembler code' in out_list_item:
if verbose:
print('Start of assembler code')
pass
elif 'End of assembler dump' in out_list_item:
if verbose:
print('End asm code')
break
elif '\t' in out_list_item:
if verbose:
print(f'One assembly line:{out_list_item}')
if out_list_item:
disas_list.append(out_list_item.lstrip())
else:
if verbose:
print(f"Mostly empty line or SOMETHING WRONG:{out_list_item}")
pass
#print(f'disaslist:{disas_list}')
disas_str = ' '.join(disas_list)
return disas_str
#return disas_list
def get_disassemble_att(funcs_and_ret_types_filtered, binary_name, verbose=False):
dataset = list()
disas_list = list()
proc_disas_list = list()
if len(funcs_and_ret_types_filtered) < 1:
print(f'len funcs_and_ret_types_filtered: {len(funcs_and_ret_types_filtered)}')
return dataset
p = Pool(nr_of_cpus)
for a,b,funcName, baseFileName in funcs_and_ret_types_filtered:
proc_disas_list.append((funcName, binary_name, "att", False))
all_disas = p.starmap(proc_disas_raw, proc_disas_list)
p.close()
p.join()
return all_disas
def get_disassemble_intel(funcs_and_ret_types_filtered, binary_name, verbose=False):
dataset = list()
disas_list = list()
proc_disas_list = list()
if len(funcs_and_ret_types_filtered) < 1:
print(f'len funcs_and_ret_types_filtered: {len(funcs_and_ret_types_filtered)}')
return []
p = Pool(nr_of_cpus)
for a,b,funcName, baseFileName in funcs_and_ret_types_filtered:
proc_disas_list.append((funcName, binary_name, "intel", False))
all_disas = p.starmap(proc_disas_raw, proc_disas_list)
p.close()
p.join()
return all_disas
#for disas in all_disas:
#c = 0
#if all_disas:
# for a,b,funcName, baseFileName in funcs_and_ret_types_filtered:
# if a and b and funcName and baseFileName and all_disas[c]:
# dataset.append((a,b,funcName, baseFileName, all_disas[c]))
# c += 1
#if c != len(funcs_and_ret_types_filtered):
# print("Not so many disassemblies as funcs ????")
#if verbose:
# print(f'One dataset item: dataset[0]: {next(iter(dataset))}')
#return dataset
def build_tf_dataset(ds_list):
counter = 0
tf_dataset = ''
if len(ds_list) > 0:
for ds in ds_list:
if counter == 0:
tf_dataset = tf.data.Dataset.from_tensors([ds[0], ds[1]])
counter = 1
if counter > 0:
tf_dataset = tf_dataset.concatenate(tf.data.Dataset.from_tensors([ds[0], ds[1]]))
#tf_dataset = tf.data.Dataset.from_tensor_slices(ds_list)
for elem in tf_dataset:
print(f'dataset element:{elem}')
break
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
#if isinstance(value, type(tf.constant(0))):
#value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
#value = np.fromiter(value, dtype=int)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return tf_dataset
def serialize_example(feature0, feature1, feature2, feature3, feature4, feature5, feature6, feature7):
"""
Creates a tf.train.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the tf.train.Example-compatible
# data type.
feature0 = feature0.numpy()
feature1 = feature1.numpy()
feature2 = feature2.numpy()
feature3 = feature3.numpy()
feature4 = feature4.numpy()
feature5 = feature5.numpy()
feature6 = feature6.numpy()
feature7 = feature7.numpy()
feature = {
'func-signature': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature0])),
'func-return-type': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature1])),
'func-name': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature2])),
'func-file-name': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature3])),
'func-att-disas': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature4])),
'func-intel-disas': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature5])),
'ubuntu-package-name': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature6])),
'ubuntu-package-binary': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature7])),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def tf_serialize_example(f0,f1, f2, f3, f4, f5, f6, f7):
tf_string = tf.py_function(
serialize_example,
(f0,f1,f2,f3,f4,f5,f6,f7), # pass these args to the above function.
tf.string) # the return type is `tf.string`.
return tf.reshape(tf_string, ()) # The result is a scalar
def save_list_to_tfrecord(ds_list, file):
item0_list = list()
item1_list = list()
item2_list = list()
item3_list = list()
item4_list = list()
item5_list = list()
item6_list = list()
item7_list = list()
if len(ds_list) > 0:
for item in ds_list:
#print(f'ds_list_item:{item}')
#print(f'ds_list_item[0]:{item[0]}')
if not isinstance(item[0], str):
print(f'type 0 >{type(item[0])}<')
if not isinstance(item[1], str):
print(f'type 1 >{type(item[1])}<')
if not isinstance(item[2], str):
print(f'type 2 >{type(item[2])}<')
if not isinstance(item[3], str):
print(f'type 3 >{type(item[3])}<')
if not isinstance(item[4], str):
print(f'type 4 >{type(item[4])}<')
if not isinstance(item[5], str):
print(f'type 5 >{type(item[5])}<')
if not isinstance(item[6], str):
print(f'type 6 >{type(item[6])}<')
if not isinstance(item[7], str):
print(f'type 7 >{type(item[7])}<')
item0_list.append(item[0])
item1_list.append(item[1])
item2_list.append(item[2])
item3_list.append(item[3])
item4_list.append(item[4])
item5_list.append(item[5])
item6_list.append(item[6])
item7_list.append(item[7])
tfrecord_dataset = tf.data.Dataset.from_tensor_slices( (item0_list,
item1_list,
item2_list,
item3_list,
item4_list,
item5_list,
item6_list,
item7_list) )
serialized_features_dataset = tfrecord_dataset.map(tf_serialize_example)
#filename = tf_record_dir + file.replace('.pickle','') + '.tfrecord'
writer = tf.data.experimental.TFRecordWriter(file)
writer.write(serialized_features_dataset)
def save_list_to_pickle(ds_list, package_name):
with open(base_path + pickles_dir + "{0}.pickle".format(package_name), 'wb') as f:
pickle.dump(ds_list, f)
###tar and zip for github, dont allow larger than 100MB files
tar_out = subprocess.run(["tar",
"cjf",
base_path + pickles_dir + package_name + ".pickle" + ".tar.bz2",
"-C",
base_path + pickles_dir,
package_name + ".pickle" ,
"--remove-files"],
capture_output=True,
universal_newlines=True)
out = tar_out.stdout
print(f'tar_out:{out}')
def push_pickle_to_github(package_name):
git_out = subprocess.run(["git", "add", "."], capture_output=True, universal_newlines=True)
out = git_out.stdout
#print(f'out1: {out}')
git_out = subprocess.run(["git", "commit", "-m", package_name], capture_output=True, universal_newlines=True)
out = git_out.stdout
#print(f'out2: {out}')
if '/' in git_pwd:
git_pwd = git_pwd.replace('/', '%2F')
url = "https://" + config['git_user'] + ":" + config['git_pwd'] + "@github.com/flobotics/func_sign_prob.git"
git_out = subprocess.run(["git", "push", url, "--all"], capture_output=True, universal_newlines=True)
out = git_out.stdout
#print(f'out3: {out}')
def check_config(config):
if config['ubuntu_pwd'] == '':
print(f'Forgot ubuntu password as argument, try -h for help')
exit()
if os.path.isdir(config['work_dir']):
print(f'Found work-dir >{config["work_dir"]}<')
else:
print(f'Create work-dir >{config["work_dir"]}<')
os.mkdir(config['work_dir'])
if os.path.isdir(config['tfrecord_save_dir']):
print(f'Found tfrecord_save_dir >{config["tfrecord_save_dir"]}<')
else:
print(f'Create tfrecord_save_dir >{config["tfrecord_save_dir"]}<')
os.mkdir(config['tfrecord_save_dir'])
if os.path.isdir(config['config_dir']):
print(f'Found config_dir >{config["config_dir"]}<')
else:
print(f'Create config_dir >{config["config_dir"]}<')
os.mkdir(config['config_dir'])
if os.path.isfile(config['filtered_out_config_file']):
print(f"Found filtered_out_config_file >{config['filtered_out_config_file']}<")
else:
print(f"Create filtered_out_config_file >{config['filtered_out_config_file']}<")
open(config['filtered_out_config_file'], 'a').close()
if os.path.isfile(config['package_all_config_file']):
print(f"Found package_all_config_file >{config['package_all_config_file']}<")
else:
print(f"Create package_all_config_file >{config['package_all_config_file']}<")
open(config['package_all_config_file'], 'a').close()
if os.path.isfile(config['package_work_config_file']):
print(f"Found package_work_config_file >{config['package_work_config_file']}<")
else:
print(f"Create package_work_config_file >{config['package_work_config_file']}<")
open(config['package_work_config_file'], 'a').close()
if os.path.isfile(config['package_dont_work_config_file']):
print(f"Found package_dont_work_config_file >{config['package_dont_work_config_file']}<")
else:
print(f"Create package_dont_work_config_file >{config['package_dont_work_config_file']}<")
open(config['package_dont_work_config_file'], 'a').close()
if os.path.isfile(config['package_binaries_config_file']):
print(f"Found package_binaries_config_file >{config['package_binaries_config_file']}<")
else:
print(f"Create package_binaries_config_file >{config['package_binaries_config_file']}<")
open(config['package_binaries_config_file'], 'a').close()
def main():
config = parseArgs()
check_config(config)
###get all packages with -dbgsym at the end
pkgs_with_dbgsym = get_all_ubuntu_dbgsym_packages(config['verbose'])
###filter out some packages, e.g. which start with firmware
filtered_pkgs_with_dbgsym = filter_dbgsym_package_list(pkgs_with_dbgsym, config, config['verbose'])
c = 0
#filtered_pkgs_with_dbgsym = ["tree-dbgsym"]
disassembly_att = list()
disassembly_intel = list()
ds_list = list()
###we loop through all packages with -dbgsym at the end
for package in filtered_pkgs_with_dbgsym:
c += 1
print(f'Package-nr:{c} of {len(filtered_pkgs_with_dbgsym)}, Name:{package}')
###get all binaries that are inside this package (without -dbgsym)
all_binaries_in_package = get_binaries_in_package(package, config, True)
#if c == 2:
#sys.exit(0)
ds_list.clear()
for binary_name in all_binaries_in_package:
#print(f'Get function signature and return type from binary: {binary_name}')
func_sign_and_ret_types = get_function_signatures_and_ret_types(binary_name)
#print(f'func_sign_and_ret_types: {func_sign_and_ret_types}')
if not func_sign_and_ret_types:
#print("NO func_sign_and_ret_types")
break
#print(f'Get return-types from names we dont know')
extended_func_and_ret_types = get_types_from_names(func_sign_and_ret_types, binary_name, False)
#print(f'extended_func_and_ret_types: {extended_func_and_ret_types}')
if not extended_func_and_ret_types:
#print("NO extended_func_and_ret_types")
break
#print(f'Get disassembly')
##disassemble_out = get_disassemble(extended_func_and_ret_types, binary_name)
disassembly_att.clear()
disassembly_att = get_disassemble_att(extended_func_and_ret_types, binary_name)
disassembly_intel.clear()
disassembly_intel = get_disassemble_intel(extended_func_and_ret_types, binary_name)
if disassembly_att and disassembly_intel:
###save everything to a list to store it later
if len(disassembly_att) != len(disassembly_intel):
print(f'Number of att and intel disassembly is different')
elif len(disassembly_att) != len(extended_func_and_ret_types):
print(f'Number of att and functions is different')
else:
disas_len = len(disassembly_att)
i = 0
while i < disas_len:
ds_list.append((extended_func_and_ret_types[i][0],
extended_func_and_ret_types[i][1],
extended_func_and_ret_types[i][2],
extended_func_and_ret_types[i][3],
disassembly_att[i],
disassembly_intel[i],
package.replace('-dbgsym', ''),
binary_name))
i += 1
else:
print(f'NO disassembly_att or disassembly intel')
pass
if len(ds_list) > 0:
print(f'Write pickle file')
#save_list_to_pickle(ds_list, package.replace('-dbgsym', ''))
#push_pickle_to_github(package.replace('-dbgsym', ''))
#package_dataset = build_tf_dataset(ds_list)
save_list_to_tfrecord(ds_list, config['tfrecord_save_dir'] + package.replace('-dbgsym', '.tfrecord'))
exit()
if __name__ == "__main__":
main()
| [
"ubu@ubu-VirtualBox"
] | ubu@ubu-VirtualBox |
df182ed949bc897da31a5f60c843f4350b9ca785 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/UsefulPythonScripts/plexi_flux_v_lambda.py | 9ddf49092d9dae358e8d1a007d8d733b52982742 | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | from time import sleep
from gumpy.commons import sics
##driveAtt(270)
sics.drive('nvs_lambda', 4.505)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.1)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.2)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.3)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.4)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
##driveAtt(240)
sics.drive('nvs_lambda', 6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.1)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.2)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.3)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.4)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
##driveAtt(210)
sics.drive('nvs_lambda', 7.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 8.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(180)
sics.drive('nvs_lambda', 9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 9.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(150)
sics.drive('nvs_lambda', 10)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 11)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 12)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(90)
sics.drive('nvs_lambda', 17)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(60)
sics.drive('nvs_lambda', 20)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 25)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 40)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
| [
"quokka@DAV1-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV1-QUOKKA.nbi.ansto.gov.au |
0685786ef1c40ebd5e6c8ea6eed8236dd947972b | 5e7cacad2e9c0b6cc8677412b8d1bce92d01f197 | /realefforttask/pages.py | 47cd26df58797bd8b62258f2483729039e32b526 | [] | no_license | chapkovski/real-effort-several-rounds | 3acb55a8396aae82a8e8609641f38e16e546df64 | 8df230066aa4d085b7d1dfcc36774749348bf038 | refs/heads/master | 2018-12-24T17:30:44.617345 | 2018-05-10T16:06:20 | 2018-05-10T16:06:20 | 117,924,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from otree.api import Currency as c, currency_range
from . import models
from ._builtin import Page, WaitPage
from .models import Constants
class WorkPage(Page):
timer_text = 'Time left to complete this round:'
timeout_seconds = Constants.task_time
def before_next_page(self):
self.player.dump_tasks()
page_sequence = [
WorkPage,
]
| [
"chapkovski@gmail.com"
] | chapkovski@gmail.com |
6c4ba86895231e56bc3c8162d45342e283b07bdb | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /analysis_tools/iFAB/psu_python_library/ifab_cost_surrogate_steel.py | 707fafbbb492bfb3fb23b3c9e4ed5146c8fd1915 | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 756 | py | import json
from sys import argv
from sys import stderr
try:
with open("testbench_manifest.json", "r+") as j:
manifest = json.load(j)
except IOError:
manifest={"Parameters":[], "Metrics":[]}
mass = None
for param in manifest["Parameters"]:
if param["Name"] == "MassInput":
mass = param["Value"]
if mass is None:
mass = float(argv[1])
surrogate_cost = 15059 + 4.4715 * float(mass)
for metric in manifest["Metrics"]:
if metric["Name"] == "surrogate_cost":
metric["Value"] = surrogate_cost
metric["Unit"] = "dollars"
with open("testbench_manifest.json", "w") as json_file:
json.dump(manifest, json_file, indent=4)
stderr.write("{0:.2f}\n".format(surrogate_cost))
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
6432db83cc0fcd16c5619e9d4e0a4e53f18d699b | ffbfb86db9dac89c1cc24e648b199a8d3db9850f | /python/python_split.py | 5c303afc89c76d0523735efbb2f7ea483c3279d4 | [] | no_license | attikis/programming | e7ecef5c2bf2af71a3e89e6156a4a934fb2ed724 | 4b6215021d6ca1effd0f18ecfe8afc67056b7098 | refs/heads/master | 2021-05-09T21:51:40.425627 | 2019-02-19T07:22:56 | 2019-02-19T07:22:56 | 118,735,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | #!/usr/bin/env python
# Permissions : chmod +x fileName.py
# To launch : python fileName.py OR ./fileName.py
# Definition : split() command used to split up a string
string1 = "pt1,pt2,pt3,pt4"
print "+++ string1 = ", string1
print '+++ string1.split(",") = %s' % (string1.split(","))
print
string2 = "pt1|pt2|pt3|pt4"
print "+++ string2 = ", string2
print '+++ string2.split("|") = %s' % (string2.split("|"))
print
string3 = "pt1 pt2 pt3 pt4"
print "+++ string3 = ", string3
print '+++ string3.split(" ") = %s' % (string3.split(" "))
print
string4 = "pt1 <**> pt2 <**> pt3 <**> pt4"
print "+++ string4 = ", string4
print '+++ string4.split(" <**> ") = %s' % (string4.split(" <**> "))
print '+++ string4.split(" <**> ", 1) = %s' % (string4.split(" <**> ",1))
print '+++ string4.split(" <**> ", 2) = %s' % (string4.split(" <**> ",2))
print
string5 = "My name is Alexandros Attikis"
print "+++ string5 = ", string5
print "+++ string5.split()", string5.split()
print '+++ string5.split(" ")', string5.split(" ")
print
mLineString = "This \n is \n a multiline \n string"
print "+++ mLineString = ", mLineString
print "+++ mLineString.split() = ", mLineString.split()
print "+++ mLineString.splitlines() = ", mLineString.splitlines()
print
print "+++ Printing all characters in a multi-line string"
for line in mLineString:
print line.split()
print
print "+++ Printing all words in a multi-line string (in separate lists)"
for line in mLineString.splitlines():
print line.split()
print
print "+++ Printing all words in a multi-line string (in one list)"
print "+++ mLineString.splitlines() = ", mLineString.splitlines()
| [
"attikis@cern.ch"
] | attikis@cern.ch |
aa0d66c01f0be4c0d50611c31202aace994fbf91 | af1a5e8245a34cb205216bc3e196045bb53f27d1 | /cottonformation/res/iot1click.py | d98e42c947b1cd8c141e081d45cdb562140d59d3 | [
"BSD-2-Clause"
] | permissive | gitter-badger/cottonformation-project | b77dfca5679566fb23a63d94c0f56aebdd6f2508 | 354f1dce7ea106e209af2d5d818b6033a27c193c | refs/heads/main | 2023-06-02T05:51:51.804770 | 2021-06-27T02:52:39 | 2021-06-27T02:52:39 | 380,639,731 | 0 | 0 | BSD-2-Clause | 2021-06-27T03:08:21 | 2021-06-27T03:08:21 | null | UTF-8 | Python | false | false | 11,103 | py | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class ProjectDeviceTemplate(Property):
"""
AWS Object Type = "AWS::IoT1Click::Project.DeviceTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-devicetemplate.html
Property Document:
- ``p_CallbackOverrides``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-devicetemplate.html#cfn-iot1click-project-devicetemplate-callbackoverrides
- ``p_DeviceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-devicetemplate.html#cfn-iot1click-project-devicetemplate-devicetype
"""
AWS_OBJECT_TYPE = "AWS::IoT1Click::Project.DeviceTemplate"
p_CallbackOverrides: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "CallbackOverrides"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-devicetemplate.html#cfn-iot1click-project-devicetemplate-callbackoverrides"""
p_DeviceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DeviceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-devicetemplate.html#cfn-iot1click-project-devicetemplate-devicetype"""
@attr.s
class ProjectPlacementTemplate(Property):
"""
AWS Object Type = "AWS::IoT1Click::Project.PlacementTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html
Property Document:
- ``p_DefaultAttributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-defaultattributes
- ``p_DeviceTemplates``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-devicetemplates
"""
AWS_OBJECT_TYPE = "AWS::IoT1Click::Project.PlacementTemplate"
p_DefaultAttributes: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "DefaultAttributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-defaultattributes"""
p_DeviceTemplates: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "DeviceTemplates"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iot1click-project-placementtemplate.html#cfn-iot1click-project-placementtemplate-devicetemplates"""
#--- Resource declaration ---
@attr.s
class Project(Resource):
"""
AWS Object Type = "AWS::IoT1Click::Project"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html
Property Document:
- ``rp_PlacementTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-placementtemplate
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-description
- ``p_ProjectName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-projectname
"""
AWS_OBJECT_TYPE = "AWS::IoT1Click::Project"
rp_PlacementTemplate: typing.Union['ProjectPlacementTemplate', dict] = attr.ib(
default=None,
converter=ProjectPlacementTemplate.from_dict,
validator=attr.validators.instance_of(ProjectPlacementTemplate),
metadata={AttrMeta.PROPERTY_NAME: "PlacementTemplate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-placementtemplate"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-description"""
p_ProjectName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ProjectName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#cfn-iot1click-project-projectname"""
@property
def rv_ProjectName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#aws-resource-iot1click-project-return-values"""
return GetAtt(resource=self, attr_name="ProjectName")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-project.html#aws-resource-iot1click-project-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class Placement(Resource):
"""
AWS Object Type = "AWS::IoT1Click::Placement"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html
Property Document:
- ``rp_ProjectName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-projectname
- ``p_AssociatedDevices``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-associateddevices
- ``p_Attributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-attributes
- ``p_PlacementName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-placementname
"""
AWS_OBJECT_TYPE = "AWS::IoT1Click::Placement"
rp_ProjectName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ProjectName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-projectname"""
p_AssociatedDevices: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "AssociatedDevices"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-associateddevices"""
p_Attributes: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Attributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-attributes"""
p_PlacementName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlacementName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#cfn-iot1click-placement-placementname"""
@property
def rv_PlacementName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#aws-resource-iot1click-placement-return-values"""
return GetAtt(resource=self, attr_name="PlacementName")
@property
def rv_ProjectName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-placement.html#aws-resource-iot1click-placement-return-values"""
return GetAtt(resource=self, attr_name="ProjectName")
@attr.s
class Device(Resource):
"""
AWS Object Type = "AWS::IoT1Click::Device"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html
Property Document:
- ``rp_DeviceId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#cfn-iot1click-device-deviceid
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#cfn-iot1click-device-enabled
"""
AWS_OBJECT_TYPE = "AWS::IoT1Click::Device"
rp_DeviceId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DeviceId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#cfn-iot1click-device-deviceid"""
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#cfn-iot1click-device-enabled"""
@property
def rv_DeviceId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#aws-resource-iot1click-device-return-values"""
return GetAtt(resource=self, attr_name="DeviceId")
@property
def rv_Enabled(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#aws-resource-iot1click-device-return-values"""
return GetAtt(resource=self, attr_name="Enabled")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iot1click-device.html#aws-resource-iot1click-device-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| [
"husanhe@gmail.com"
] | husanhe@gmail.com |
695a3b083fd4b572001864fc33c6a61d4be8573d | b523cbb45c34e9c0f1d94fce9f03d654e18f57ab | /instance_manager.py | bef0fed7e98b69508cc10e4b5957e689b5b26358 | [] | no_license | brittainhard/gpuci-scripts | 0d2d27156486724dbe80a73f31b5076bcef4324b | 27429a89b3a3564acf372fe30d6de2f59c9997c0 | refs/heads/master | 2020-03-26T00:16:38.664253 | 2018-08-29T17:29:19 | 2018-08-29T17:29:19 | 144,313,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | import os, json, argparse, time
import datetime, dateutil
from jenkinsapi import jenkins
import requests
import boto3
NON_GPU_JOBS = [
"goai-docker-container-builder",
"gpu-instance-manager",
"gpu-instance-spawner"
]
def get_instances():
return list(rs.instances.iterator())
def instance_is_running(instance):
return instance.state["Code"] == 16
def get_running_instances(instances):
running_instances = []
for x in instances:
if instance_is_running(x):
running_instances.append(x)
return running_instances
def get_gpu_instance(instances):
for x in instances:
if x.image.id == AMI:
return x
return None
def attach_elastic_ip(instance):
try:
response = cl.associate_address(AllocationId=ELASTIC_IP,
InstanceId=instance.id)
print(response)
except ClientError as e:
print(e)
def create_gpu_instance(dry_run=False):
spot_request = cl.request_spot_instances(
DryRun=dry_run,
InstanceCount=1,
SpotPrice=SPOT_PRICE,
Type="one-time",
LaunchSpecification={
"ImageId": AMI,
"KeyName": "goai-gpuci",
"SecurityGroupIds": [SECURITY_GROUP],
"InstanceType": INSTANCE_SIZE,
"Placement": {
"AvailabilityZone": "us-east-2b"
}
}
)
def spawn_instances(dry_run=False):
instances = get_instances()
running = get_running_instances(instances)
gpu = get_gpu_instance(running)
if gpu:
return
elif not gpu:
create_gpu_instance(dry_run)
instance = None
while not instance:
print("Not Running.")
time.sleep(5)
instance = get_gpu_instance(get_running_instances(get_instances()))
print("Instance created.")
attach_elastic_ip(instance)
print("Elastic IP Attached.")
time.sleep(5)
def get_jobs():
jenk = jenkins.Jenkins(JENKINS_URL)
jobs = []
for item in jenk.items():
if str(item[1]) in [str(job) for job in jobs]:
continue
elif str(item[1]) in NON_GPU_JOBS:
continue
jobs.append(item[1])
return jobs
def jobs_running(jobs):
return any([job.is_running() for job in jobs])
def time_difference(instance):
tm = datetime.datetime.now(tz=dateutil.tz.tz.tzutc()) - instance.launch_time
hours, remainder = divmod(tm.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return datetime.time(minute=minutes, second=seconds)
def close_to_next_hour(instance):
difference = 60 - time_difference(instance).minute
return difference <= 2, difference
def manage_instances(dry_run=False, terminate_instance=False):
jobs = jobs_running(get_jobs())
gpu = get_gpu_instance(get_running_instances(get_instances()))
if gpu and terminate_instance:
gpu.terminate()
return
if not gpu:
print("Instance is not running.")
return
expiry = close_to_next_hour(gpu)
if not expiry[0]:
print("Instance not yet ready to be stopped.")
print("%d minutes left" % expiry[1])
return
if jobs:
print("Jobs are still running")
return
if not dry_run:
print("Terminating instance")
gpu.terminate()
return
if __name__ == "__main__":
SECURITY_GROUP = os.environ.get("SECURITY_GROUP", "")
AMI = os.environ.get("AMI", "")
ELASTIC_IP = os.environ.get("ELASTIC_IP", "")
INSTANCE_SIZE = os.environ.get("INSTANCE_SIZE", "")
JENKINS_URL = os.environ.get("JENKINS_URL", "")
AWS_CREDENTIALS_URL = os.environ.get("AWS_CREDENTIALS_URL", "")
SPOT_PRICE = float(os.environ.get("SPOT_PRICE", "0.0"))
r = requests.get(AWS_CREDENTIALS_URL)
creds = json.loads(r.text)
AWS_KEY_ID = creds["AccessKeyId"]
AWS_KEY = creds["SecretAccessKey"]
AWS_SESSION_TOKEN = creds["Token"]
session = boto3.Session(
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_KEY,
aws_session_token=AWS_SESSION_TOKEN,
region_name="us-east-2"
)
rs = session.resource('ec2')
cl = session.client('ec2')
parser = argparse.ArgumentParser("Spawns instances and checks for instance statuses.")
parser.add_argument("--spawn-instances", dest="instance_spawner",
action="store_true", default=False)
parser.add_argument("--manage-instances", dest="instance_manager",
action="store_true", default=False)
parser.add_argument("--dry-run", dest="dry_run",
action="store_true", default=False)
parser.add_argument("--terminate-instance", dest="terminate",
action="store_true", default=False)
args = parser.parse_args()
if args.instance_spawner and args.instance_manager:
exit("Cannot spawn and manage instances at the same time.")
elif not args.instance_spawner and not args.instance_manager:
exit("Please specify either --spawn-instances or --manage-instances.")
elif args.instance_spawner:
spawn_instances(dry_run=args.dry_run)
exit(0)
elif args.instance_manager:
manage_instances(dry_run=args.dry_run, terminate_instance=args.terminate)
exit(0)
| [
"brittainhard@gmail.com"
] | brittainhard@gmail.com |
7aea99a375d5f22517415ab30c8a1a7ada5ba817 | 58aade23413d23f0d4666d7da3766ccbf820d0e1 | /bk/script/summarize_script/summary_phs.py | 1f480209775dfd6bf6a9fff5eb203d679377c5dc | [] | no_license | friend1ws/PDL1_pipeline | 661837ad4f3c791439fcbae3ca32db47b2b6e8a2 | 79bb55297dac04c808577d51f8714a34fe9dad74 | refs/heads/master | 2020-04-15T02:32:43.931129 | 2016-05-04T06:47:20 | 2016-05-04T06:47:20 | 41,615,248 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | #! /usr/bin/env python
import sys, glob
sampleList = sys.argv[1]
expDir = sys.argv[2]
fusionDir = sys.argv[3]
hIN = open(sampleList, 'r')
for line in hIN:
F = line.rstrip('\n').split('\t')
sample = F[0]
targetExpDir = glob.glob(expDir + "/" + sample + "*")
targetFusionDir = glob.glob(fusionDir + "/" + sample + "*")
expRatio = "---"
if len(targetExpDir) > 0:
hIN2 = open(targetExpDir[0] + "/CD274.exon.exp.txt", "r")
tmp1 = 10.0
tmp2 = 10.0
for line in hIN2:
F2 = line.rstrip('\n').split('\t')
if F2[0] == "CD274_3": tmp1 = float(F2[1])
if F2[0] == "CD274_UTR": tmp2 = float(F2[1])
expRatio = tmp1 / tmp2
fusion = "---"
if len(targetFusionDir) > 0:
hIN2 = open(targetFusionDir[0] + "/star.fusion.result.txt", "r")
for line in hIN2:
F2 = line.rstrip('\n').split('\t')
if F2[8] == "CD274" or F2[9] == "CD274":
fusion = True
if fusion == "---": fusion = False
if expRatio != "---" and fusion != "---":
print sample + '\t' + str(expRatio) + '\t' + str(fusion)
| [
"friend1ws@gmail.com"
] | friend1ws@gmail.com |
a2f660894a3d10df1c7a71e0782ffb017a97f802 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2972.py | 2fd708180642ad8b97474fbea1db1b2ae25b8eec | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | def yz(s, stalls):
g = len(stalls)
l = 0
r = 0
while True:
if s+r+1 >= g:
break
if stalls[s+r+1] == 0:
r += 1
else:
break
while True:
if s-l-1 < 0:
break
if stalls[s-l-1] == 0:
l += 1
else:
break
return (min(l,r), max(l,r))
for _ in range(int(input())):
n, k = [int(i) for i in input().split()]
stalls = [0 for i in range(n+2)]
stalls[0] = 1
stalls[-1] = 1
for i in range(k):
bests = 0
best = (0, 0)
for s in range(1, n+1):
if stalls[s] == 0:
gg = yz(s, stalls)
if yz(s, stalls) > best:
best = gg
bests = s
stalls[bests] = 1
if i == k-1:
print("Case #" + str(_ + 1) + ":", best[1], best[0])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cf7c207170b97bd72c6456a05b66eb280835a058 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/follow302_status_body.py | 609f30a679e4f7051de995d36eec1c97b7acb914 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Follow302StatusBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'follow_status': 'str'
}
attribute_map = {
'domain_id': 'domain_id',
'follow_status': 'follow_status'
}
def __init__(self, domain_id=None, follow_status=None):
"""Follow302StatusBody - a model defined in huaweicloud sdk"""
self._domain_id = None
self._follow_status = None
self.discriminator = None
if domain_id is not None:
self.domain_id = domain_id
if follow_status is not None:
self.follow_status = follow_status
@property
def domain_id(self):
"""Gets the domain_id of this Follow302StatusBody.
加速域名id。获取方法请参见查询加速域名。
:return: The domain_id of this Follow302StatusBody.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this Follow302StatusBody.
加速域名id。获取方法请参见查询加速域名。
:param domain_id: The domain_id of this Follow302StatusBody.
:type: str
"""
self._domain_id = domain_id
@property
def follow_status(self):
"""Gets the follow_status of this Follow302StatusBody.
follow302状态(\"off\"/\"on\")
:return: The follow_status of this Follow302StatusBody.
:rtype: str
"""
return self._follow_status
@follow_status.setter
def follow_status(self, follow_status):
"""Sets the follow_status of this Follow302StatusBody.
follow302状态(\"off\"/\"on\")
:param follow_status: The follow_status of this Follow302StatusBody.
:type: str
"""
self._follow_status = follow_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Follow302StatusBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
74e4a49f5cc0b6f968eed7f58149de37464fd37b | f5fa77d99d31d4e8fef2648b83e9d988123be118 | /hanser/models/detection/gfocal.py | 841fdaa8c0c56a3fb85d366cebad9e3746961704 | [] | no_license | sbl1996/hanser | 6ff5362f6909c4ba717c10b5f7baf31a41b70531 | 21c6f9470dd21a5b2e7a18318f40314a34053822 | refs/heads/master | 2022-06-04T03:46:39.034645 | 2022-05-21T03:34:08 | 2022-05-21T03:34:08 | 197,355,428 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.initializers import RandomNormal, Zeros
from hanser.ops import safe_softmax, top_k
from hanser.models.layers import Linear
from hanser.models.detection.detector import SingleStageDetector
from hanser.models.detection.neck.fpn import FPN
from hanser.models.detection.retinanet import RetinaHead
from hanser.detection.assign import mlvl_concat
def integral(prob):
# n: (..., 4, n+1)
reg_max = prob.shape[-1] - 1
p = tf.constant(np.linspace(0, reg_max, reg_max + 1), dtype=prob.dtype)
p = tf.tensordot(prob, p, axes=[[-1], [0]])
return p
class GFocal(SingleStageDetector):
def __init__(self, backbone, num_classes, backbone_indices=(1, 2, 3),
feat_channels=256, num_extra_convs=2, stacked_convs=4, norm='bn'):
super().__init__()
self.backbone = backbone
self.backbone_indices = backbone_indices
backbone_channels = [backbone.feat_channels[i] for i in backbone_indices]
self.neck = FPN(backbone_channels, feat_channels, num_extra_convs,
extra_convs_on='output', norm=norm)
num_levels = len(backbone_indices) + num_extra_convs
strides = [2 ** (l + backbone_indices[0] + 2) for l in range(num_levels)]
self.head = GFocalHead(
num_classes, feat_channels, feat_channels, stacked_convs,
norm=norm, strides=strides)
class GFocalHead(RetinaHead):
def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4,
norm='bn', strides=(8, 16, 32, 64, 128), reg_max=16, reg_topk=4, reg_channels=64):
super().__init__(
1, num_classes, in_channels, feat_channels, stacked_convs,
centerness=False, bbox_out_channels=4 * (reg_max + 1),
concat=False, norm=norm, num_levels=len(strides))
self.strides = strides
self.reg_max = reg_max
self.reg_topk = reg_topk
self.reg_channels = reg_channels
self.reg_conf = Sequential([
Linear(4 * (reg_topk + 1), reg_channels, act='relu',
kernel_init=RandomNormal(stddev=0.01), bias_init=Zeros()),
Linear(reg_channels, 1, act='sigmoid',
kernel_init=RandomNormal(stddev=0.01), bias_init=Zeros()),
])
def call(self, x):
preds = super().call(x)
bbox_preds = preds['bbox_pred']
cls_scores = preds['cls_score']
b = tf.shape(bbox_preds[0])[0]
num_level_bboxes = [p.shape[1] for p in bbox_preds]
bbox_preds = tf.concat(bbox_preds, axis=1)
cls_scores = tf.concat(cls_scores, axis=1)
dis_logits = tf.reshape(bbox_preds, [b, -1, 4, self.reg_max + 1])
prob = safe_softmax(dis_logits, axis=-1)
prob_topk = top_k(prob, k=self.reg_topk)
stat = tf.concat([prob_topk, tf.reduce_mean(prob_topk, axis=-1, keepdims=True)], axis=-1)
stat = tf.reshape(stat, [b, -1, 4 * (self.reg_topk + 1)])
quality_score = self.reg_conf(stat)
cls_scores = tf.nn.sigmoid(cls_scores) * quality_score
scales = mlvl_concat(self.strides, num_level_bboxes, prob.dtype)[None, :, None]
bbox_preds = integral(prob) * scales
return {'dis_logit': dis_logits, 'bbox_pred': bbox_preds, 'cls_score': cls_scores,
'scales': scales}
| [
"sbl1996@126.com"
] | sbl1996@126.com |
e8aa80bf6d288ff8e7a87cbdf26ee0b74daf3509 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/sympy/physics/units/systems/natural.py | 124392e550ae5effd4ffc8ef3c5ef72af017eec4 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 931 | py | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Naturalunit system.
The natural system comes from "setting c = 1, hbar = 1". From the computer
point of view it means that we use velocity and action instead of length and
time. Moreover instead of mass we use energy.
"""
from __future__ import division
from sympy.physics.units.definitions import c, eV, hbar
from sympy.physics.units.dimensions import (
DimensionSystem, action, energy, force, frequency, length, mass, momentum,
power, time, velocity)
from sympy.physics.units.prefixes import PREFIXES, prefix_unit
from sympy.physics.units.unitsystem import UnitSystem
# dimension system
_natural_dim = DimensionSystem(
base_dims=(action, energy, velocity),
derived_dims=(length, mass, time, momentum, force, power, frequency)
)
units = prefix_unit(eV, PREFIXES)
# unit system
natural = UnitSystem(base=(hbar, eV, c), units=units, name="Natural system")
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
95e03440d89285f10df39b55be1fd06e80e3332f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/user_list_type.py | 64bb9c0c18d0075bb477ea405690a9c7d7888bd2 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'UserListTypeEnum',
},
)
class UserListTypeEnum(proto.Message):
r"""The user list types. """
class UserListType(proto.Enum):
r"""Enum containing possible user list types."""
UNSPECIFIED = 0
UNKNOWN = 1
REMARKETING = 2
LOGICAL = 3
EXTERNAL_REMARKETING = 4
RULE_BASED = 5
SIMILAR = 6
CRM_BASED = 7
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
f07ab92ea930bbe7555bfa5f6e2cbebf127da184 | 81b0e6fe7a6e56ed8a91748499b81ddd3f2e45f8 | /GAN/mnist_inforgan.py | ae5cfe025134b4efad95451acb734d8a7928f906 | [] | no_license | shieldforever/DeepLearning | cfef817602b9677df4df4c1b87e60c5e91f2315a | b8080938a4b22395379be9032266df36cb5491e6 | refs/heads/master | 2021-01-05T14:12:26.110888 | 2019-10-29T11:23:06 | 2019-10-29T11:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,069 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 20 19:39:36 2019
@author: Administrator
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("MNIST_data/")
tf.reset_default_graph()
def generator(x):
reuse = len([t for t in tf.global_variables() if t.name.startswith('generator')]) > 0
#print (x.get_shape())
with tf.variable_scope('generator', reuse = reuse):
x = slim.fully_connected(x, 1024)
#print( x)
x = slim.batch_norm(x, activation_fn=tf.nn.relu)
x = slim.fully_connected(x, 7*7*128)
x = slim.batch_norm(x, activation_fn=tf.nn.relu)
x = tf.reshape(x, [-1, 7, 7, 128])
# print '22',tensor.get_shape()
x = slim.conv2d_transpose(x, 64, kernel_size=[4,4], stride=2, activation_fn = None)
#print ('gen',x.get_shape())
x = slim.batch_norm(x, activation_fn = tf.nn.relu)
z = slim.conv2d_transpose(x, 1, kernel_size=[4, 4], stride=2, activation_fn=tf.nn.sigmoid)
#print ('genz',z.get_shape())
return z
def leaky_relu(x):
return tf.where(tf.greater(x, 0), x, 0.01 * x)
def discriminator(x, num_classes=10, num_cont=2):
reuse = len([t for t in tf.global_variables() if t.name.startswith('discriminator')]) > 0
#print (reuse)
#print (x.get_shape())
with tf.variable_scope('discriminator', reuse=reuse):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
x = slim.conv2d(x, num_outputs = 64, kernel_size=[4,4], stride=2, activation_fn=leaky_relu)
x = slim.conv2d(x, num_outputs=128, kernel_size=[4,4], stride=2, activation_fn=leaky_relu)
#print ("conv2d",x.get_shape())
x = slim.flatten(x)
shared_tensor = slim.fully_connected(x, num_outputs=1024, activation_fn = leaky_relu)
recog_shared = slim.fully_connected(shared_tensor, num_outputs=128, activation_fn = leaky_relu)
disc = slim.fully_connected(shared_tensor, num_outputs=1, activation_fn=None)
disc = tf.squeeze(disc, -1)
#print ("disc",disc.get_shape())#0 or 1
recog_cat = slim.fully_connected(recog_shared, num_outputs=num_classes, activation_fn=None)
recog_cont = slim.fully_connected(recog_shared, num_outputs=num_cont, activation_fn=tf.nn.sigmoid)
return disc, recog_cat, recog_cont
batch_size = 10
classes_dim = 10
con_dim = 2
rand_dim = 38
n_input = 784
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.int32, [None])
z_con = tf.random_normal((batch_size, con_dim))#2列
z_rand = tf.random_normal((batch_size, rand_dim))#38列
z = tf.concat(axis=1, values=[tf.one_hot(y, depth = classes_dim), z_con, z_rand])#50列
gen = generator(z)
genout= tf.squeeze(gen, -1)
# labels for discriminator
y_real = tf.ones(batch_size) #真
y_fake = tf.zeros(batch_size)#假
# 判别器
disc_real, class_real, _ = discriminator(x)
disc_fake, class_fake, con_fake = discriminator(gen)
pred_class = tf.argmax(class_fake, dimension=1)
# 判别器 loss
loss_d_r = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real, labels=y_real))
loss_d_f = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=y_fake))
loss_d = (loss_d_r + loss_d_f) / 2
#print ('loss_d', loss_d.get_shape())
# generator loss
loss_g = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake, labels=y_real))
# categorical factor loss
loss_cf = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=class_fake, labels=y))#class ok 图片对不上
loss_cr = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=class_real, labels=y))#生成的图片与class ok 与输入的class对不上
loss_c =(loss_cf + loss_cr) / 2
# continuous factor loss
loss_con =tf.reduce_mean(tf.square(con_fake-z_con))
# 获得各个网络中各自的训练参数
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
disc_global_step = tf.Variable(0, trainable=False)
gen_global_step = tf.Variable(0, trainable=False)
train_disc = tf.train.AdamOptimizer(0.0001).minimize(loss_d + loss_c + loss_con, var_list = d_vars, global_step = disc_global_step)
train_gen = tf.train.AdamOptimizer(0.001).minimize(loss_g + loss_c + loss_con, var_list = g_vars, global_step = gen_global_step)
training_epochs = 3
display_step = 1
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)#取数据
feeds = {x: batch_xs, y: batch_ys}
# Fit training using batch data
l_disc, _, l_d_step = sess.run([loss_d, train_disc, disc_global_step],feed_dict=feeds)
l_gen, _, l_g_step = sess.run([loss_g, train_gen, gen_global_step],feed_dict=feeds)
# 显示训练中的详细信息
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f} ".format(l_disc),l_gen)
print("Finished!")
# 测试
print ("Result:", loss_d.eval({x: mnist.test.images[:batch_size],y:mnist.test.labels[:batch_size]})
, loss_g.eval({x: mnist.test.images[:batch_size],y:mnist.test.labels[:batch_size]}))
# 根据图片模拟生成图片
show_num = 10
gensimple,d_class,inputx,inputy,con_out = sess.run(
[genout,pred_class,x,y,con_fake], feed_dict={x: mnist.test.images[:batch_size],y: mnist.test.labels[:batch_size]})
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(show_num):
a[0][i].imshow(np.reshape(inputx[i], (28, 28)))
a[1][i].imshow(np.reshape(gensimple[i], (28, 28)))
print("d_class",d_class[i],"inputy",inputy[i],"con_out",con_out[i])
plt.draw()
plt.show()
my_con=tf.placeholder(tf.float32, [batch_size,2])
myz = tf.concat(axis=1, values=[tf.one_hot(y, depth = classes_dim), my_con, z_rand])
mygen = generator(myz)
mygenout= tf.squeeze(mygen, -1)
my_con1 = np.ones([10,2])
a = np.linspace(0.0001, 0.99999, 10)
y_input= np.ones([10])
figure = np.zeros((28 * 10, 28 * 10))
my_rand = tf.random_normal((10, rand_dim))
for i in range(10):
for j in range(10):
my_con1[j][0]=a[i]
my_con1[j][1]=a[j]
y_input[j] = j
mygenoutv = sess.run(mygenout,feed_dict={y:y_input,my_con:my_con1})
for jj in range(10):
digit = mygenoutv[jj].reshape(28, 28)
figure[i * 28: (i + 1) * 28,
jj * 28: (jj + 1) * 28] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
| [
"870407139@qq.com"
] | 870407139@qq.com |
e97dfd3726227b91404d69a2d8fec5bbd24c4630 | 0550c08cee19be891fde34fa109b5a4ad9f07e3a | /findingshared/findingshared.py | d97a6a0b84b2d5e368e82c243576bb1cbec62235 | [] | no_license | bendavidsteel/rosalind-solutions | 92653c49d8ef938306ac1289ccb4e4cfe4b8d3ae | 0749f2662efcac62383a8476ce13fcdd039928b1 | refs/heads/master | 2020-03-28T04:17:00.959446 | 2018-09-06T21:32:06 | 2018-09-06T21:32:06 | 147,705,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | maxlen = 2
maxstring = ""
def jloop
with open('sample.txt') as stringfile:
data = stringfile.read().split('\n')[1::2]
for i in range(len(min(data, key=len))):
for n in range(maxlen, len(min(data, key=len))):
for j in range(1, len(data)):
print(j)
print(data[0][i:i+n])
print(data[j][i:i+n])
if data[0][i:i+n] != data[j][i:i+n]:
break
else:
continue
if n > maxlen:
maxlen = n
maxstring = data[0][i:i+n]
break
output = open("output.txt", 'w')
output.write(maxstring)
output.close()
| [
"bendavidsteel@gmail.com"
] | bendavidsteel@gmail.com |
357746ec09b140827b2d7be08c9a9f27e7a7f71f | fce76a80bafa84b188fd9761d769f79dd712d79c | /JH_RestAPI/jobapps/migrations/0012_jobapplicationnote.py | 695d6185dc04b277ff8709a9c32c427aca09ecbf | [] | no_license | komal14prasad/backend | 547110ddb16f4ad16be7f3aebd87ad2cea52ee2f | 4a915f2e744c5697d8a90a59e358c1ce94b47d69 | refs/heads/master | 2020-05-22T02:34:21.237812 | 2019-05-12T03:36:22 | 2019-05-12T03:36:22 | 186,200,468 | 0 | 0 | null | 2019-05-12T02:03:13 | 2019-05-12T02:03:12 | null | UTF-8 | Python | false | false | 912 | py | # Generated by Django 2.1.5 on 2019-03-18 03:05
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobapps', '0011_auto_20190210_1053'),
]
operations = [
migrations.CreateModel(
name='JobApplicationNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('update_date', models.DateTimeField(blank=True, null=True)),
('job_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='jobapps.JobApplication')),
],
),
]
| [
"sdemirci55@gmail.com"
] | sdemirci55@gmail.com |
e6cb8f8f82846c0b57397552dae31ee75edf7f6f | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/ca/__init__.py | d25d40e257fd706cc679a013ba3733ee90462e07 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 2,297 | py | # coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
# uncomment if files are available
# from .norm_exceptions import NORM_EXCEPTIONS
# from .tag_map import TAG_MAP
# from .morph_rules import MORPH_RULES
# uncomment if lookup-based lemmatizer is available
from .lemmatizer import LOOKUP
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
# Create a Language subclass
# Documentation: https://spacy.io/docs/usage/adding-languages
# This file should be placed in spacy/lang/ca (ISO code of language).
# Before submitting a pull request, make sure the remove all comments from the
# language data files, and run at least the basic tokenizer tests. Simply add the
# language ID to the list of languages in spacy/tests/conftest.py to include it
# in the basic tokenizer sanity tests. You can optionally add a fixture for the
# language's tokenizer and add more specific tests. For more info, see the
# tests documentation: https://github.com/explosion/spaCy/tree/master/spacy/tests
class CatalanDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'ca' # ISO code
# add more norm exception dictionaries here
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
# overwrite functions for lexical attributes
lex_attr_getters.update(LEX_ATTRS)
# add custom tokenizer exceptions to base exceptions
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
# add stop words
stop_words = STOP_WORDS
# if available: add tag map
# tag_map = dict(TAG_MAP)
# if available: add morph rules
# morph_rules = dict(MORPH_RULES)
lemma_lookup = LOOKUP
class Catalan(Language):
lang = 'ca' # ISO code
Defaults = CatalanDefaults # set Defaults to custom language defaults
# set default export – this allows the language class to be lazy-loaded
__all__ = ['Catalan']
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
616e6e51eee2039efa70734239b813e32f6858b0 | b46c889743320f084485ab1a38dfc28416171d6a | /airflow/serialization/serialization.py | 0173fa89d7fb40f9511aef89cbf823ffba3bbf64 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause",
"Python-2.0"
] | permissive | jasonfry89/incubator-airflow-prefix | 61541be0c0ee5feb9dfba7e5061904f9c8bbe3cd | 4bd87f4dfe7a27c7c6456e2fa946464f7e428a61 | refs/heads/master | 2023-01-22T10:07:43.542723 | 2019-10-24T22:15:16 | 2019-10-24T22:15:16 | 83,245,336 | 1 | 1 | Apache-2.0 | 2023-01-13T23:40:57 | 2017-02-26T22:28:42 | Python | UTF-8 | Python | false | false | 10,629 | py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils for DAG serialization with JSON."""
import datetime
import enum
import json
import logging
from typing import TYPE_CHECKING, Dict, Optional, Union
import pendulum
from dateutil import relativedelta
import airflow
from airflow.exceptions import AirflowException
from airflow.models.baseoperator import BaseOperator
from airflow.models.connection import Connection
from airflow.models.dag import DAG
from airflow.serialization.enums import DagAttributeTypes as DAT, Encoding
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.www.utils import get_python_source
LOG = LoggingMixin().log
# Serialization failure returns 'failed'.
FAILED = 'serialization_failed'
class Serialization:
"""Serialization provides utils for serialization."""
# JSON primitive types.
_primitive_types = (int, bool, float, str)
# Time types.
# datetime.date and datetime.time are converted to strings.
_datetime_types = (datetime.datetime,)
# Object types that are always excluded in serialization.
# FIXME: not needed if _included_fields of DAG and operator are customized.
_excluded_types = (logging.Logger, Connection, type)
_json_schema = None # type: Optional[Validator]
_CONSTRUCTOR_PARAMS = {} # type: Dict[str, Parameter]
SERIALIZER_VERSION = 1
@classmethod
def to_json(cls, var: Union[DAG, BaseOperator, dict, list, set, tuple]) -> str:
"""Stringifies DAGs and operators contained by var and returns a JSON string of var.
"""
return json.dumps(cls.to_dict(var), ensure_ascii=True)
@classmethod
def to_dict(cls, var: Union[DAG, BaseOperator, dict, list, set, tuple]) -> dict:
"""Stringifies DAGs and operators contained by var and returns a dict of var.
"""
# Don't call on this class directly - only SerializedDAG or
# SerializedBaseOperator should be used as the "entrypoint"
raise NotImplementedError()
@classmethod
def from_json(cls, serialized_obj: str) -> Union[
'SerializedDAG', 'SerializedBaseOperator', dict, list, set, tuple]:
"""Deserializes json_str and reconstructs all DAGs and operators it contains."""
return cls.from_dict(json.loads(serialized_obj))
@classmethod
def from_dict(cls, serialized_obj: dict) -> Union[
'SerializedDAG', 'SerializedBaseOperator', dict, list, set, tuple]:
"""Deserializes a python dict stored with type decorators and
reconstructs all DAGs and operators it contains."""
return cls._deserialize(serialized_obj)
@classmethod
def validate_schema(cls, serialized_obj: Union[str, dict]):
"""Validate serialized_obj satisfies JSON schema."""
if cls._json_schema is None:
raise AirflowException('JSON schema of {:s} is not set.'.format(cls.__name__))
if isinstance(serialized_obj, dict):
cls._json_schema.validate(serialized_obj)
elif isinstance(serialized_obj, str):
cls._json_schema.validate(json.loads(serialized_obj))
else:
raise TypeError("Invalid type: Only dict and str are supported.")
@staticmethod
def _encode(x, type_):
"""Encode data by a JSON dict."""
return {Encoding.VAR: x, Encoding.TYPE: type_}
@classmethod
def _is_primitive(cls, var):
"""Primitive types."""
return var is None or isinstance(var, cls._primitive_types)
@classmethod
def _is_excluded(cls, var, attrname, instance):
"""Types excluded from serialization."""
# pylint: disable=unused-argument
return (
var is None or
isinstance(var, cls._excluded_types) or
cls._value_is_hardcoded_default(attrname, var)
)
@classmethod
def _serialize(cls, var): # pylint: disable=too-many-return-statements
"""Helper function of depth first search for serialization.
The serialization protocol is:
(1) keeping JSON supported types: primitives, dict, list;
(2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization
step decode VAR according to TYPE;
(3) Operator has a special field CLASS to record the original class
name for displaying in UI.
"""
try:
if cls._is_primitive(var):
# enum.IntEnum is an int instance, it causes json dumps error so we use its value.
if isinstance(var, enum.Enum):
return var.value
return var
elif isinstance(var, dict):
return cls._encode(
{str(k): cls._serialize(v) for k, v in var.items()},
type_=DAT.DICT
)
elif isinstance(var, list):
return [cls._serialize(v) for v in var]
elif isinstance(var, DAG):
return airflow.serialization.SerializedDAG.serialize_dag(var)
elif isinstance(var, BaseOperator):
return airflow.serialization.SerializedBaseOperator.serialize_operator(var)
elif isinstance(var, cls._datetime_types):
return cls._encode(var.timestamp(), type_=DAT.DATETIME)
elif isinstance(var, datetime.timedelta):
return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA)
elif isinstance(var, (pendulum.tz.Timezone, pendulum.tz.timezone_info.TimezoneInfo)):
return cls._encode(str(var.name), type_=DAT.TIMEZONE)
elif isinstance(var, relativedelta.relativedelta):
encoded = {k: v for k, v in var.__dict__.items() if not k.startswith("_") and v}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded['weekday'] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded['weekday'] = [var.weekday.weekday]
return cls._encode(encoded, type_=DAT.RELATIVEDELTA)
elif callable(var):
return str(get_python_source(var))
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
return cls._encode(
[cls._serialize(v) for v in var], type_=DAT.SET)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
return cls._encode(
[cls._serialize(v) for v in var], type_=DAT.TUPLE)
else:
LOG.debug('Cast type %s to str in serialization.', type(var))
return str(var)
except Exception: # pylint: disable=broad-except
LOG.warning('Failed to stringify.', exc_info=True)
return FAILED
@classmethod
def _deserialize(cls, encoded_var): # pylint: disable=too-many-return-statements
"""Helper function of depth first search for deserialization."""
# JSON primitives (except for dict) are not encoded.
if cls._is_primitive(encoded_var):
return encoded_var
elif isinstance(encoded_var, list):
return [cls._deserialize(v) for v in encoded_var]
assert isinstance(encoded_var, dict)
var = encoded_var[Encoding.VAR]
type_ = encoded_var[Encoding.TYPE]
if type_ == DAT.DICT:
return {k: cls._deserialize(v) for k, v in var.items()}
elif type_ == DAT.DAG:
return airflow.serialization.SerializedDAG.deserialize_dag(var)
elif type_ == DAT.OP:
return airflow.serialization.SerializedBaseOperator.deserialize_operator(var)
elif type_ == DAT.DATETIME:
return pendulum.from_timestamp(var)
elif type_ == DAT.TIMEDELTA:
return datetime.timedelta(seconds=var)
elif type_ == DAT.TIMEZONE:
return pendulum.timezone(var)
elif type_ == DAT.RELATIVEDELTA:
if 'weekday' in var:
var['weekday'] = relativedelta.weekday(*var['weekday'])
return relativedelta.relativedelta(**var)
elif type_ == DAT.SET:
return {cls._deserialize(v) for v in var}
elif type_ == DAT.TUPLE:
return tuple([cls._deserialize(v) for v in var])
else:
raise TypeError('Invalid type {!s} in deserialization.'.format(type_))
_deserialize_datetime = pendulum.from_timestamp
_deserialize_timezone = pendulum.timezone
@classmethod
def _deserialize_timedelta(cls, seconds):
return datetime.timedelta(seconds=seconds)
@classmethod
def _value_is_hardcoded_default(cls, attrname, value):
"""
Return true if ``value`` is the hard-coded default for the given attribute.
This takes in to account cases where the ``concurrency`` parameter is
stored in the ``_concurrency`` attribute.
And by using `is` here only and not `==` this copes with the case a
user explicitly specifies an attribute with the same "value" as the
default. (This is because ``"default" is "default"`` will be False as
they are different strings with the same characters.)
"""
if attrname in cls._CONSTRUCTOR_PARAMS and cls._CONSTRUCTOR_PARAMS[attrname].default is value:
return True
return False
if TYPE_CHECKING:
from airflow.serialization.json_schema import Validator
from airflow.serialization.serialized_baseoperator import SerializedBaseOperator # noqa: F401, E501; # pylint: disable=cyclic-import
from airflow.serialization.serialized_dag import SerializedDAG # noqa: F401, E501; # pylint: disable=cyclic-import
from inspect import Parameter
| [
"kaxilnaik@gmail.com"
] | kaxilnaik@gmail.com |
61004a97a2ccdbbd703c0335da0142a4d4b5ed9e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_glazed.py | 4b9d651bd336809ed5c8db024f25696635fdae74 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _GLAZED():
def __init__(self,):
self.name = "GLAZED"
self.definitions = glaze
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['glaze']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c2e58269c8af68e51587b23ce08e779eb9fc0c3e | 4b9f2d543755cf5ab9e1ddeab46e123b5a3e73ee | /lect05_52-week_saving_challenge/money_challenge_v4.0.py | 3d6c292351487772efecae124d9eeac25207b03a | [] | no_license | guolikai/mypython | ff8e3bbc22e346d90126b551b945909d64c4bb3e | 7c4003e5a70d306b34a102b3b32c667898c5e9c8 | refs/heads/master | 2022-12-25T06:52:06.253960 | 2020-09-30T13:53:09 | 2020-09-30T13:53:09 | 82,179,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
作者:GuoLikai
功能:52周存钱挑战
版本:3.0
日期:05/08/2018
2.0增加功能:记录每周的存款数
3.0增加功能:使用循环直接计数
4.0增加功能:灵活设置每周的存款数,增加的存款数及存款周数
"""
import math
def save_money_in_n_weeks(money_per_week, increase_money, total_week):
"""
计算n周内的存款金额
"""
money_list = [] # 记录每周存款数的列表
for i in range(total_week):
money_list.append(money_per_week)
saving = math.fsum(money_list)
# 输出信息
# print('第{}周,存入{}元,账户累计{}元'.format(i + 1, money_per_week, saving))
# 更新下一周的存钱金额
money_per_week += increase_money
return saving
def main():
"""
主函数
"""
money_per_week = float(input('请输入每周的存入的金额:')) # 每周的存入的金额
increase_money = float(input('请输入每周的递增金额:')) # 递增的金额
total_week = int(input('请输入总共的周数:')) # 总共的周数
# 调用函数
saving = save_money_in_n_weeks(money_per_week, increase_money, total_week)
print('总存款金额', saving)
if __name__ == '__main__':
main()
| [
"glk73748196@sina.com"
] | glk73748196@sina.com |
cbd162117e95a560810e5d1c96b3b9c5300777c2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fish1_20200804200900.py | 7517919c0ab14ea3f149b1054c468da0ce11e9e1 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | def fish(A,B):
# where A represent the size of fish and B represents the direction of fish
aliveFish = len(A)
bigFish = 0
indexFish = None
j = 0
while j < len(B)-1:
if B[j] == 1 and B[j+1] !=1:
if A[j] > A[j+1]:
if A[j] > bigFish:
bigFish = A[j]
A.remove(A[j+1])
B.remove(B[j+1])
else:
if A[j+1] > bigFish:
bigFish = A[j+1]
A.remove(A[j])
B.remove(B[j])
aliveFish -=1
j +=1
fish([4,3,2,1,5],[0,1,0,0,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a3d019059a08c7f50daa263ad9a29b74d90f96d7 | 49663ea34b41c8180d7484f778f5cad2e701d220 | /tests/dualtor/test_server_failure.py | 07c2bf67601fef2bf59b1e15b84bc027badc9c60 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stepanblyschak/sonic-mgmt | ed08c98e7bff1615b057daa8711686aa5986073d | a1ae1e0b4e9927e6f52916f76121780d19ec3e54 | refs/heads/master | 2023-04-07T01:30:11.403900 | 2023-03-29T10:16:52 | 2023-03-29T10:16:52 | 135,678,178 | 0 | 0 | NOASSERTION | 2023-03-29T16:13:55 | 2018-06-01T06:41:49 | Python | UTF-8 | Python | false | false | 3,378 | py | import pytest
from tests.common.dualtor.mux_simulator_control import toggle_simulator_port_to_upper_tor, \
simulator_flap_counter, simulator_server_down # noqa F401
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.dualtor.dual_tor_utils import show_muxcable_status, rand_selected_interface # noqa: F401
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, \
run_icmp_responder # noqa: F401
from tests.common.utilities import wait_until
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.usefixtures('run_garp_service', 'run_icmp_responder')
]
@pytest.fixture(autouse=True, scope='module')
def skip_if_non_dualtor_topo(tbinfo):
pytest_require('dualtor' in tbinfo['topo']['name'], "Only run on dualtor testbed")
def test_server_down(duthosts, tbinfo, rand_selected_interface, simulator_flap_counter, # noqa F811
simulator_server_down, toggle_simulator_port_to_upper_tor, loganalyzer): # noqa F811
"""
Verify that mux cable is not toggled excessively.
"""
for analyzer in list(loganalyzer.values()):
analyzer.ignore_regex.append(r".*ERR swss#orchagent: :- setState: \
State transition from active to active is not-handled")
upper_tor = duthosts[tbinfo['duts'][0]]
lower_tor = duthosts[tbinfo['duts'][1]]
def upper_tor_mux_state_verification(state, health):
mux_state_upper_tor = show_muxcable_status(upper_tor)
return mux_state_upper_tor[itfs]['status'] == state and mux_state_upper_tor[itfs]['health'] == health
def lower_tor_mux_state_verfication(state, health):
mux_state_lower_tor = show_muxcable_status(lower_tor)
return mux_state_lower_tor[itfs]['status'] == state and mux_state_lower_tor[itfs]['health'] == health
itfs, _ = rand_selected_interface
# Set upper_tor as active
toggle_simulator_port_to_upper_tor(itfs)
pytest_assert(wait_until(30, 1, 0, upper_tor_mux_state_verification, 'active', 'healthy'),
"mux_cable status is unexpected. Should be (active, healthy). Test can't proceed. ")
mux_flap_counter_0 = simulator_flap_counter(itfs)
# Server down
simulator_server_down(itfs)
# Verify mux_cable state on upper_tor is active
pytest_assert(wait_until(20, 1, 0, upper_tor_mux_state_verification, 'active', 'unhealthy'),
"mux_cable status is unexpected. Should be (active, unhealthy)")
# Verify mux_cable state on lower_tor is standby
pytest_assert(wait_until(20, 1, 0, lower_tor_mux_state_verfication, 'standby', 'unhealthy'),
"mux_cable status is unexpected. Should be (standby, unhealthy)")
# Verify that mux_cable flap_counter should be no larger than 3
# lower_tor(standby) -> active -> standby
# upper_tor(active) -> active
# The toggle from both tor may be overlapped and invisible
mux_flap_counter_1 = simulator_flap_counter(itfs)
pytest_assert(mux_flap_counter_1 - mux_flap_counter_0 <= 3,
"The mux_cable flap count should be no larger than 3 ({})"
.format(mux_flap_counter_1 - mux_flap_counter_0))
| [
"noreply@github.com"
] | stepanblyschak.noreply@github.com |
7e79e805f44cd3ea15b84a2b395d30fbdc293fdd | e569aaa98d90ebfed429da9f1e8697b6122c66f9 | /ecoroofs/locations/migrations/0008_add_point_obscured_to_location_model.py | fd5dab8a0eab3b0b2cab506c1d8508d2dfc692c2 | [] | no_license | PSU-OIT-ARC/ecoroofs | 4f4a5df0b15faf8d4442155e7a70104a2c25b44f | e7acf776dbd19e90e22635424808c8c6807d7572 | refs/heads/develop | 2020-05-21T16:16:48.799309 | 2017-11-08T21:31:36 | 2017-11-08T21:31:36 | 64,701,178 | 0 | 3 | null | 2017-07-24T19:06:49 | 2016-08-01T20:59:05 | Python | UTF-8 | Python | false | false | 422 | py | import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('locations', '0007_add_year_built_to_location_model'),
]
operations = [
migrations.AddField(
model_name='location',
name='point_obscured',
field=django.contrib.gis.db.models.fields.PointField(srid=4326),
),
]
| [
"wbaldwin@pdx.edu"
] | wbaldwin@pdx.edu |
5a89b194f60aa6e8e7016670b169d895bdfd01e1 | c5458f2d53d02cb2967434122183ed064e1929f9 | /sdks/python/test/test_spending_proof.py | c58e33d52c302a1ce178f05e03d2ef9822171fd3 | [] | no_license | ross-weir/ergo-node-api-sdks | fd7a32f79784dbd336ef6ddb9702b9dd9a964e75 | 9935ef703b14760854b24045c1307602b282c4fb | refs/heads/main | 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: ergoplatform@protonmail.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.spending_proof import SpendingProof
class TestSpendingProof(unittest.TestCase):
"""SpendingProof unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSpendingProof(self):
"""Test SpendingProof"""
# FIXME: construct object with mandatory attributes with example values
# model = SpendingProof() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"29697678+ross-weir@users.noreply.github.com"
] | 29697678+ross-weir@users.noreply.github.com |
d23250f1b6b7e554bdf137997e2f02efa56feb70 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/230/28357/submittedfiles/swamee.py | e5f429c7808c600df6e2cb6d975c9662a428b88e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f = float(input('Digite o valor de f: '))
L = float(input('Digite o valor de L: '))
Q = float(input('Digite o valor de Q: '))
DeltaH = float(input('Digite o valor de DeltaH: '))
v = float(input('Digite o valor de v: '))
g = 9.81
E = 0.000002
D = (8*f*L*(Q**2)/((math.pi**2)*g*DeltaH))**0.2
Rey = (4*Q)/(math.pi*D*v)
k = (0.25)/(math.log10(E/(3.7*D)+(5.74/(Rey**0.9))))**2
print ('D = %.4f' % D)
print ('Rey = %.4f' % Rey)
print ('k = %.4f' % k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d3264668c412aae9c2b8f4a274600bcaf638fa2c | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-1657.py | e668c238fd53a248f5ab3ab63f60e886d9c08853 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,276 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if $Exp:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
db4640a338afe81eaf782bac82394116670bbc95 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02804/s938106196.py | e07f2d428fc7a4fa2bbc75241a67288a2325428b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | n,k,*l=map(int,open(0).read().split());l.sort();c,a,M=1,0,10**9+7
for i in range(k-1,n):a+=c*(l[i]-l[~i]);c=c*-~i*pow(i-k+2,M-2,M)%M
print(a%M) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
96a81c995e6509af66ced54e9cbf02b096b0225e | f3b233e5053e28fa95c549017bd75a30456eb50c | /CDK2_input/L1Q/1Q-17_MD_NVT_rerun/set.py | 4968dfce41158d47dc9301795d22f2f25b4657d4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/CDK2/L1Q/MD/ti_one-step/1Q_17/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../1Q-17_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
7f52f11801e3fffbfe661b3083bf80ff6d892b55 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/nsr/config/__init__.py | 9a12221583f320c26938511e05c9621d25059fca | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 9,296 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/nsr/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Non-Stop-Routing configuration.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__enabled',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'nsr', u'config']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/nsr/config/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/nsr/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = {'enabled': enabled, }
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/nsr/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Non-Stop-Routing configuration.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__enabled',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'nsr', u'config']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/nsr/config/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/nsr/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = {'enabled': enabled, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
6ad574210aec5d9ae894dd168817de91af980274 | 482ed16cd1c8d721e98a9c460555802f7cce8906 | /run-tests/t032.py | 4caff0dd44dcf916fb563b60fce4a4dcec25511d | [
"MIT"
] | permissive | forkcodeaiyc/skulpt_parser | ea2347b2a452476854cf03412474fae63bca31c0 | dd592e9b91bcbbe0c5cfdb5c2da0fb5ae604a428 | refs/heads/master | 2023-09-04T11:32:09.760317 | 2021-10-11T22:58:18 | 2021-10-11T22:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print(("1234"[-3:3]))
| [
"albert-jan.nijburg@babylonhealth.com"
] | albert-jan.nijburg@babylonhealth.com |
e3b56cb7ad8f137d9438e36fdcaa4d4aeb773562 | 0a089d954518ef4a8f6aecf7097af8124e425d7e | /everest/tests/complete_app/resources.py | 3b0e03d850c91b76cc8b9fb22d05059d17c439e1 | [
"MIT"
] | permissive | papagr/everest | b13c06834ae38a5d441a9fd1c938d495ceca6e20 | 70c9b93c3061db5cb62428349d18b8fb8566411b | refs/heads/master | 2021-05-28T16:55:23.021924 | 2015-02-17T09:21:03 | 2015-02-17T09:21:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py | """
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Dec 1, 2011.
"""
from everest.resources.base import Member
from everest.resources.descriptors import attribute_alias
from everest.resources.descriptors import collection_attribute
from everest.resources.descriptors import member_attribute
from everest.resources.descriptors import terminal_attribute
from everest.tests.complete_app.interfaces import IMyEntity
from everest.tests.complete_app.interfaces import IMyEntityChild
from everest.tests.complete_app.interfaces import IMyEntityGrandchild
from everest.tests.complete_app.interfaces import IMyEntityParent
import datetime
from everest.constants import CARDINALITIES
__docformat__ = 'reStructuredText en'
__all__ = ['MyEntityChildMember',
'MyEntityMember',
'MyEntityGrandchildMember',
'MyEntityParentMember',
]
class MyEntityParentMember(Member):
relation = 'http://test.org/myentity-parent'
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
#
text_alias = attribute_alias('text')
class MyEntityMember(Member):
relation = 'http://test.org/myentity'
# Member.
parent = member_attribute(IMyEntityParent, 'parent',
cardinality=CARDINALITIES.ONETOONE,
backref='child')
# Collection.
children = collection_attribute(IMyEntityChild, 'children',
backref='parent')
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
# Number terminal.
number = terminal_attribute(int, 'number')
# Date time terminal.
date_time = terminal_attribute(datetime.datetime, 'date_time')
# Dotted attribute.
parent_text = terminal_attribute(str, 'parent.text_ent')
class MyEntityChildMember(Member):
relation = 'http://test.org/myentity-child'
# Member.
parent = member_attribute(IMyEntity, 'parent', backref='children')
# Collection accessed as entity attribute and represented as
# "parent equal to parent member" (backreferencing) specification.
children = collection_attribute(IMyEntityGrandchild,
entity_attr='children',
backref='parent')
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
class MyEntityGrandchildMember(Member):
relation = 'http://test.org/myentity-grandchild'
# String terminal.
text = terminal_attribute(str, 'text')
# String terminal with different name in entity.
text_rc = terminal_attribute(str, 'text_ent')
# Member.
parent = member_attribute(IMyEntityChild, 'parent', backref='children')
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
d83c4645ef7cc9f79a7c70d11198e76aad49ad85 | 9eaa17f50df49e5c6d204a7a7ece52f94c10b30b | /d3rlpy/algos/torch/utility.py | f431a78d8326da7fcd4c9ff7492d1147190162df | [
"MIT"
] | permissive | mchetouani/d3rlpy | 08d452ea2c8735b679a02474a3ae512caf91250a | 53ccf604298568b3a8322bb6f38bc33c0ac04ca2 | refs/heads/master | 2023-02-03T15:19:23.620608 | 2020-11-29T02:25:34 | 2020-11-29T02:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,435 | py | import torch
import numpy as np
from inspect import signature
def soft_sync(targ_model, model, tau):
with torch.no_grad():
params = model.parameters()
targ_params = targ_model.parameters()
for p, p_targ in zip(params, targ_params):
p_targ.data.mul_(1 - tau)
p_targ.data.add_(tau * p.data)
def hard_sync(targ_model, model):
with torch.no_grad():
params = model.parameters()
targ_params = targ_model.parameters()
for p, p_targ in zip(params, targ_params):
p_targ.data.copy_(p.data)
def set_eval_mode(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
module.eval()
def set_train_mode(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
module.train()
def to_cuda(impl, device):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, (torch.nn.Module, torch.nn.Parameter)):
module.cuda(device)
def to_cpu(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, (torch.nn.Module, torch.nn.Parameter)):
module.cpu()
def freeze(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
for p in module.parameters():
p.requires_grad = False
def unfreeze(impl):
for key in dir(impl):
module = getattr(impl, key)
if isinstance(module, torch.nn.Module):
for p in module.parameters():
p.requires_grad = True
def compute_augmentation_mean(augmentation, n_augmentations, func, inputs,
targets):
ret = 0.0
for _ in range(n_augmentations):
kwargs = dict(inputs)
for target in targets:
kwargs[target] = augmentation.transform(kwargs[target])
ret += func(**kwargs)
return ret / n_augmentations
def get_state_dict(impl):
rets = {}
for key in dir(impl):
obj = getattr(impl, key)
if isinstance(obj, (torch.nn.Module, torch.optim.Optimizer)):
rets[key] = obj.state_dict()
return rets
def set_state_dict(impl, chkpt):
for key in dir(impl):
obj = getattr(impl, key)
if isinstance(obj, (torch.nn.Module, torch.optim.Optimizer)):
obj.load_state_dict(chkpt[key])
def map_location(device):
if 'cuda' in device:
return lambda storage, loc: storage.cuda(device)
if 'cpu' in device:
return 'cpu'
raise ValueError('invalid device={}'.format(device))
def torch_api(scaler_targets=[]):
def _torch_api(f):
# get argument names
sig = signature(f)
arg_keys = list(sig.parameters.keys())[1:]
def wrapper(self, *args, **kwargs):
# convert all args to torch.Tensor
tensors = []
for i, val in enumerate(args):
if isinstance(val, torch.Tensor):
tensor = val
elif isinstance(val, np.ndarray):
if val.dtype == np.uint8:
dtype = torch.uint8
else:
dtype = torch.float32
tensor = torch.tensor(data=val,
dtype=dtype,
device=self.device)
else:
tensor = torch.tensor(data=val,
dtype=torch.float32,
device=self.device)
# preprocess
if self.scaler and arg_keys[i] in scaler_targets:
tensor = self.scaler.transform(tensor)
# make sure if the tensor is float32 type
if tensor.dtype != torch.float32:
tensor = tensor.float()
tensors.append(tensor)
return f(self, *tensors, **kwargs)
return wrapper
return _torch_api
def eval_api(f):
def wrapper(self, *args, **kwargs):
set_eval_mode(self)
return f(self, *args, **kwargs)
return wrapper
def train_api(f):
def wrapper(self, *args, **kwargs):
set_train_mode(self)
return f(self, *args, **kwargs)
return wrapper
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
612311f401562e6479a566b357190ad038ebd82e | a303cea3e4a5b9d774700111954b837e11ce8f64 | /Werkgroep API beveiliging/Implementaties/poc-oauth-python/web/woco_irma/woco_irma/wsgi.py | 910342af165b65cd78403f3a1559593c0eb7855a | [] | no_license | HenriKorver/KP-APIs | ee639ad9409b12710f6296e6cbf5d861b6d91571 | 3dde9bebf63c35b036145771ebf22d0851a5378c | refs/heads/master | 2021-06-12T05:22:10.712394 | 2021-06-09T14:55:41 | 2021-06-09T14:55:41 | 174,525,243 | 0 | 0 | null | 2019-05-16T15:06:41 | 2019-03-08T11:24:08 | HTML | UTF-8 | Python | false | false | 395 | py | """
WSGI config for woco_irma project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'woco_irma.settings')
application = get_wsgi_application()
| [
"sergei@maykinmedia.nl"
] | sergei@maykinmedia.nl |
1d8e81008f94de9bc956344891d46ea12658172d | bbed0e21b241c6a39341fed7d058563c30e2e0a4 | /tests/trainer/warnings_tests/test_flow_warnings.py | f5328d44020d717de90fa564f4c03c177fc40aa4 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | haven-jeon/pytorch-lightning | 656917a6cace5c3723d951f84515e8cb74a0ec79 | 6b9362bb73ada4c8702def43fe8f27eb3c382540 | refs/heads/master | 2023-02-23T18:29:08.298601 | 2021-01-28T07:42:14 | 2021-01-28T07:42:14 | 325,184,155 | 2 | 0 | Apache-2.0 | 2021-01-28T07:42:15 | 2020-12-29T04:19:44 | Python | UTF-8 | Python | false | false | 1,453 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from tests.base.boring_model import BoringModel
from pytorch_lightning import Trainer
import warnings
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
return acc
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_no_depre_without_epoch_end(tmpdir):
"""
Tests that only training_step can be used
"""
model = TestModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
with warnings.catch_warnings(record=True) as w:
trainer.fit(model)
for msg in w:
assert 'should not return anything ' not in str(msg)
| [
"noreply@github.com"
] | haven-jeon.noreply@github.com |
630f388259ee71dd9d1b0b441c0117db73ee9b8d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/frnpau013/question2.py | 71396f2476198ce3175c7385e66de6c4fc587b0d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | def line_maker(string, length):
if len(string) > length:
line = string[0:length]
if line[-1] != " " and string[len(line)] != " ":
while line[-1] != " ":
line = delete_till_space(line)
line = line[:-1]
return line, string[len(line) + 1:]
else:
if line[-1] != " ":
return line, string[len(line) + 1:]
else:
return line[:-1], string[len(line):]
else:
line = string
return line, ""
def delete_till_space(string):
if string[-1] == " ":
return string
else:
return delete_till_space(string[:-1])
def paragraph_printer(paragraph_list, length, name):
outfile = open(name, 'w')
for paragraph in paragraph_list:
while paragraph > "":
line, paragraph = line_maker(paragraph, length)
print(line, file = outfile)
print('\n', end = '', file = outfile)
def main():
# getting inputted variables
name_infile = input('Enter the input filename:\n')
name_outfile = input('Enter the output filename:\n')
line_length = eval(input('Enter the line width:\n'))
# reading text from input
infile = open(name_infile, "r")
in_text = infile.read()
infile.close()
# splitting text into list of paragraphs
paragraph_list = in_text.split('\n\n')
for i in range(len(paragraph_list)):
paragraph_list[i] = paragraph_list[i].replace('\n', ' ')
# printing reformatted paragraphs to output
paragraph_printer(paragraph_list, line_length, name_outfile)
if __name__ == '__main__':
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
0da64247f2f05b4d47521f6362ed1d6f94bae56f | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Classification/multi_label_classify_bert/inference.py | 6517992e9a21b0f516d22a671658ec98435a63c4 | [] | no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 2,940 | py | """
@file : inference.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2021-06-23
"""
import torch
from tqdm import tqdm
import pandas as pd
from model import Model
from config import set_args
from torch.utils.data import Dataset, DataLoader
from transformers.models.bert import BertTokenizer
class MyDataset(Dataset):
def __init__(self, dataframe, maxlen=256, test=False):
self.df = dataframe
self.maxlen = maxlen
self.test = test
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
# 将问题和其对应的细节进行拼接
text = str(self.df.question_title.values[idx]) + str(self.df.question_detail.values[idx])
encoding = tokenizer(text, padding='max_length', truncation=True, max_length=self.maxlen, return_tensors='pt')
input_ids = encoding['input_ids'][0]
attention_mask = encoding['attention_mask'][0]
if self.test:
return input_ids, attention_mask
else:
# 如果不是测试集 制作标签
tags = self.df.tag_ids.values[idx].split('|')
tags = [int(x) - 1 for x in tags] # 标签是从零开始的
label = torch.zeros((args.num_classes,))
label[tags] = 1 # 转成类似one_hot标签
return input_ids, attention_mask, label
def test_model():
result = []
model.eval()
tk = tqdm(test_loader, total=len(test_loader), position=0, leave=True)
with torch.no_grad():
for idx, (input_ids, attention_mask) in enumerate(tk):
input_ids, attention_mask = input_ids.cuda(), attention_mask.cuda()
output = model(input_ids, attention_mask)
for res in output: # 后处理,找大于0.5的类别(阈值可以微调),如果多了就取TOP5,如果没有就取TOP1
_, res1 = torch.topk(res, 5)
res1 = res1.cpu().numpy()
res2 = torch.where(res > 0.5)[0]
res2 = res2.cpu().numpy()
if len(res2) > 5:
result.append(res1)
elif len(res2) == 0:
result.append(res1[0])
else:
result.append(res2)
with open('submission.csv', 'w')as f:
for i in range(len(result)):
f.write(str(i) + ',')
res = [str(x + 1) for x in result[i]]
if len(res) < 5:
res += ['-1'] * (5 - len(res))
f.write(','.join(res))
f.write('\n')
if __name__ == '__main__':
args = set_args()
test = pd.read_csv(args.test_data)
test_set = MyDataset(test, test=True)
tokenizer = BertTokenizer.from_pretrained(args.vocab)
model = Model()
# 加载权重
model.load_state_dict(torch.load('model_epoch1.bin'))
test_loader = DataLoader(test_set, batch_size=args.test_batch_size, shuffle=False)
test_model()
| [
"luxiaonlp@163.com"
] | luxiaonlp@163.com |
de909440434b4cb133fa34f8573d268118fd1143 | fa889d051a1b3c4d861fb06b10aa5b2e21f97123 | /kbe/src/lib/python/Lib/ssl.py | d3c18ed1b7936b1eea54635f05cf51c62b1d99b0 | [
"MIT",
"LGPL-3.0-only",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0"
] | permissive | BuddhistDeveloper/HeroLegendServer | bcaa837e3bbd6544ce0cf8920fd54a1a324d95c8 | 8bf77679595a2c49c6f381c961e6c52d31a88245 | refs/heads/master | 2022-12-08T00:32:45.623725 | 2018-01-15T02:01:44 | 2018-01-15T02:01:44 | 117,069,431 | 1 | 1 | MIT | 2022-11-19T15:58:30 | 2018-01-11T08:05:32 | Python | UTF-8 | Python | false | false | 34,420 | py | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
PROTOCOL_TLSv1_1
PROTOCOL_TLSv1_2
The following constants identify various SSL alert message descriptions as per
http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6
ALERT_DESCRIPTION_CLOSE_NOTIFY
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE
ALERT_DESCRIPTION_BAD_RECORD_MAC
ALERT_DESCRIPTION_RECORD_OVERFLOW
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE
ALERT_DESCRIPTION_HANDSHAKE_FAILURE
ALERT_DESCRIPTION_BAD_CERTIFICATE
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE
ALERT_DESCRIPTION_CERTIFICATE_REVOKED
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN
ALERT_DESCRIPTION_ILLEGAL_PARAMETER
ALERT_DESCRIPTION_UNKNOWN_CA
ALERT_DESCRIPTION_ACCESS_DENIED
ALERT_DESCRIPTION_DECODE_ERROR
ALERT_DESCRIPTION_DECRYPT_ERROR
ALERT_DESCRIPTION_PROTOCOL_VERSION
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY
ALERT_DESCRIPTION_INTERNAL_ERROR
ALERT_DESCRIPTION_USER_CANCELLED
ALERT_DESCRIPTION_NO_RENEGOTIATION
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
ALERT_DESCRIPTION_UNRECOGNIZED_NAME
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
"""
import textwrap
import re
import sys
import os
from collections import namedtuple
from enum import Enum as _Enum
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import (VERIFY_DEFAULT, VERIFY_CRL_CHECK_LEAF, VERIFY_CRL_CHECK_CHAIN,
VERIFY_X509_STRICT)
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_egd, RAND_add, RAND_bytes, RAND_pseudo_bytes
def _import_symbols(prefix):
for n in dir(_ssl):
if n.startswith(prefix):
globals()[n] = getattr(_ssl, n)
_import_symbols('OP_')
_import_symbols('ALERT_DESCRIPTION_')
_import_symbols('SSL_ERROR_')
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import _OPENSSL_API_VERSION
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except ImportError:
_SSLv2_IF_EXISTS = None
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
try:
from _ssl import PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
except ImportError:
pass
else:
_PROTOCOL_NAMES[PROTOCOL_TLSv1_1] = "TLSv1.1"
_PROTOCOL_NAMES[PROTOCOL_TLSv1_2] = "TLSv1.2"
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
from socket import socket, AF_INET, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import base64 # for DER-to-PEM translation
import errno
socket_error = OSError # keep that public name in module namespace
if _ssl.HAS_TLS_UNIQUE:
CHANNEL_BINDING_TYPES = ['tls-unique']
else:
CHANNEL_BINDING_TYPES = []
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Finally use RC4 as a fallback which is problematic but needed for
# compatibility some times.
# * Disable NULL authentication, NULL encryption, and MD5 MACs for security
# reasons
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
)
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for
# security reasons
_RESTRICTED_SERVER_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
leftmost, *remainder = dn.split(r'.')
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
DefaultVerifyPaths = namedtuple("DefaultVerifyPaths",
"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env "
"openssl_capath")
def get_default_verify_paths():
"""Return paths to default cafile and capath.
"""
parts = _ssl.get_default_verify_paths()
# environment vars shadow paths
cafile = os.environ.get(parts[0], parts[1])
capath = os.environ.get(parts[2], parts[3])
return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None,
capath if os.path.isdir(capath) else None,
*parts)
class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")):
"""ASN.1 object identifier lookup
"""
__slots__ = ()
def __new__(cls, oid):
return super().__new__(cls, *_txt2obj(oid, name=False))
@classmethod
def fromnid(cls, nid):
"""Create _ASN1Object from OpenSSL numeric ID
"""
return super().__new__(cls, *_nid2obj(nid))
@classmethod
def fromname(cls, name):
"""Create _ASN1Object from short name, long name or OID
"""
return super().__new__(cls, *_txt2obj(name, name=True))
class Purpose(_ASN1Object, _Enum):
"""SSLContext purpose flags with X509v3 Extended Key Usage objects
"""
SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
class SSLContext(_SSLContext):
"""An SSLContext holds various SSL-related configuration options and
data, such as certificates and possibly a private key."""
__slots__ = ('protocol', '__weakref__')
_windows_cert_stores = ("CA", "ROOT")
def __new__(cls, protocol, *args, **kwargs):
self = _SSLContext.__new__(cls, protocol)
if protocol != _SSLv2_IF_EXISTS:
self.set_ciphers(_DEFAULT_CIPHERS)
return self
def __init__(self, protocol):
self.protocol = protocol
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def set_npn_protocols(self, npn_protocols):
protos = bytearray()
for protocol in npn_protocols:
b = bytes(protocol, 'ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('NPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_npn_protocols(protos)
def _load_windows_store_certs(self, storename, purpose):
certs = bytearray()
for cert, encoding, trust in enum_certificates(storename):
# CA certs are never PKCS#7 encoded
if encoding == "x509_asn":
if trust is True or purpose.oid in trust:
certs.extend(cert)
self.load_verify_locations(cadata=certs)
return certs
def load_default_certs(self, purpose=Purpose.SERVER_AUTH):
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
if sys.platform == "win32":
for storename in self._windows_cert_stores:
self._load_windows_store_certs(storename, purpose)
else:
self.set_default_verify_paths()
def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_stdlib_context(protocol=PROTOCOL_SSLv23, *, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
if HAS_SNI:
raise ValueError("check_hostname requires server_hostname")
else:
raise ValueError("check_hostname requires server_hostname, "
"but it's not supported by your OpenSSL "
"library")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
if sock is not None:
socket.__init__(self,
family=sock.family,
type=sock.type,
proto=sock.proto,
fileno=sock.fileno())
self.settimeout(sock.gettimeout())
sock.detach()
elif fileno is not None:
socket.__init__(self, fileno=fileno)
else:
socket.__init__(self, family=family, type=type, proto=proto)
# See if we are connected
try:
self.getpeername()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self, server_side,
server_hostname)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except (OSError, ValueError):
self.close()
raise
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=0, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Read on closed or unwrapped SSL socket.")
try:
if buffer is not None:
v = self._sslobj.read(len, buffer)
else:
v = self._sslobj.read(len or 1024)
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Write on closed or unwrapped SSL socket.")
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
try:
v = self._sslobj.write(data)
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def recvmsg(self, *args, **kwargs):
raise NotImplementedError("recvmsg not allowed on instances of %s" %
self.__class__)
def recvmsg_into(self, *args, **kwargs):
raise NotImplementedError("recvmsg_into not allowed on instances of "
"%s" % self.__class__)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self, block=False):
"""Perform a TLS/SSL handshake."""
self._check_connected()
timeout = self.gettimeout()
try:
if timeout == 0.0 and block:
self.settimeout(None)
self._sslobj.do_handshake()
finally:
self.settimeout(timeout)
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = self.context._wrap_socket(self, False, self.server_hostname)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except (OSError, ValueError):
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict')
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodebytes(d.encode('ASCII', 'strict'))
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with create_connection(addr) as sock:
with context.wrap_socket(sock) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
| [
"liushuaigeq@163.com"
] | liushuaigeq@163.com |
113d33a1c0aa203a0c29d4de06bc864d0cef03be | c725fc58d217f6730687a565fbf85fcf174e8009 | /txt_figs/pre_pkoffset_correction/test_miscen_nfw_Delt_L.py | 97d45749662802d0049715673d2352a0b96024dd | [] | no_license | Kein-Cary/Intracluster-Light | 6faca2bd0413244765474beeffd53cfaa401eef2 | ffcb2d6ea10be45422c7e73408fc6ff6cadf3a85 | refs/heads/master | 2023-03-18T04:51:06.539453 | 2023-03-12T02:48:01 | 2023-03-12T02:48:01 | 160,816,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,203 | py | import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import h5py
import numpy as np
import pandas as pds
import emcee
import corner
import astropy.constants as C
import astropy.units as U
from astropy import cosmology as apcy
from scipy import signal
from scipy import interpolate as interp
from scipy import optimize
from scipy import integrate as integ
from fig_out_module import arr_jack_func
from light_measure import cov_MX_func
from surface_mass_density import sigmam, sigmac, input_cosm_model, cosmos_param
# cosmology model
Test_model = apcy.Planck15.clone(H0 = 67.74, Om0 = 0.311)
H0 = Test_model.H0.value
h = H0/100
Omega_m = Test_model.Om0
Omega_lambda = 1.-Omega_m
Omega_k = 1.- (Omega_lambda + Omega_m)
Omega_b = Test_model.Ob0
# constant
M_sun = C.M_sun.value # in unit of kg
kpc2m = U.kpc.to(U.m)
Mpc2cm = U.Mpc.to(U.cm)
Msun2kg = U.M_sun.to(U.kg)
rad2arcsec = U.rad.to(U.arcsec)
Lsun = C.L_sun.value*10**7 # (erg/s/cm^2)
Jy = 10**(-23) # (erg/s)/cm^2/Hz
F0 = 3.631 * 10**(-6) * Jy
L_speed = C.c.value # m/s
pixel = 0.396
band = ['r', 'g', 'i']
L_wave = np.array([ 6166, 4686, 7480 ])
## solar Magnitude corresponding to SDSS filter
Mag_sun = [ 4.65, 5.11, 4.53 ]
### === ### initial surface_mass_density.py module
input_cosm_model( get_model = Test_model )
cosmos_param()
### === ### halo profile model
from colossus.cosmology import cosmology
from colossus.halo import profile_einasto
from colossus.halo import profile_nfw
from colossus.halo import profile_hernquist
cosmos_param = {'flat': True, 'H0': H0, 'Om0': Omega_m, 'Ob0': Omega_b, 'sigma8' : 0.811, 'ns': 0.965}
cosmology.addCosmology('myCosmo', cosmos_param )
cosmo = cosmology.setCosmology( 'myCosmo' )
### === ### miscentering nfw profile (Zu et al. 2020, section 3.)
def mis_p_func( r_off, sigma_off):
"""
r_off : the offset between cluster center and BCGs
sigma_off : characteristic offset
"""
pf0 = r_off / sigma_off**2
pf1 = np.exp( - r_off / sigma_off )
return pf0 * pf1
def off_sigma_func( rp, sigma_off, z, c_mass, lgM, v_m):
theta = np.linspace( 0, 2 * np.pi, 100)
d_theta = np.diff( theta )
try:
NR = len( rp )
except:
rp = np.array( [rp] )
NR = len( rp )
r_off = np.arange( 0, 15 * sigma_off, 0.02 * sigma_off )
off_pdf = mis_p_func( r_off, sigma_off )
NR_off = len( r_off )
surf_dens_off = np.zeros( NR, dtype = np.float32 )
for ii in range( NR ):
surf_dens_arr = np.zeros( NR_off, dtype = np.float32 )
for jj in range( NR_off ):
r_cir = np.sqrt( rp[ii]**2 + 2 * rp[ii] * r_off[jj] * np.cos( theta ) + r_off[jj]**2 )
surf_dens_of_theta = sigmam( r_cir, lgM, z, c_mass )
## integration on theta
surf_dens_arr[jj] = integ.simps( surf_dens_of_theta, theta) / ( 2 * np.pi )
## integration on r_off
integ_f = surf_dens_arr * off_pdf
surf_dens_ii = integ.simps( integ_f, r_off )
surf_dens_off[ ii ] = surf_dens_ii
off_sigma = surf_dens_off
return off_sigma
def cc_off_sigma_func( rp, sigma_off, z, c_mass, lgM, v_m):
theta = np.linspace( 0, 2 * np.pi, 100)
d_theta = np.diff( theta )
N_theta = len( theta )
try:
NR = len( rp )
except:
rp = np.array( [rp] )
NR = len( rp )
r_off = np.arange( 0, 15 * sigma_off, 0.02 * sigma_off )
off_pdf = mis_p_func( r_off, sigma_off )
dr_off = np.diff( r_off )
NR_off = len( r_off )
surf_dens_off = np.zeros( NR, dtype = np.float32 )
for ii in range( NR ):
surf_dens_arr = np.zeros( (NR_off, N_theta), dtype = np.float32 )
for jj in range( NR_off ):
r_cir = np.sqrt( rp[ii]**2 + 2 * rp[ii] * r_off[jj] * np.cos( theta ) + r_off[jj]**2 )
surf_dens_arr[jj,:] = sigmam( r_cir, lgM, z, c_mass,)
## integration on theta
medi_surf_dens = ( surf_dens_arr[:,1:] + surf_dens_arr[:,:-1] ) / 2
sum_theta_fdens = np.sum( medi_surf_dens * d_theta, axis = 1) / ( 2 * np.pi )
## integration on r_off
integ_f = sum_theta_fdens * off_pdf
medi_integ_f = ( integ_f[1:] + integ_f[:-1] ) / 2
surf_dens_ii = np.sum( medi_integ_f * dr_off )
surf_dens_off[ ii ] = surf_dens_ii
off_sigma = surf_dens_off
return off_sigma
def aveg_sigma_func(rp, sigma_arr, N_grid = 100):
NR = len( rp )
aveg_sigma = np.zeros( NR, dtype = np.float32 )
tR = rp
intep_sigma_F = interp.interp1d( tR , sigma_arr, kind = 'cubic', fill_value = 'extrapolate',)
for ii in range( NR ):
new_rp = np.logspace(-3, np.log10( tR[ii] ), N_grid)
new_sigma = intep_sigma_F( new_rp )
cumu_sigma = integ.simps( new_rp * new_sigma, new_rp)
aveg_sigma[ii] = 2 * cumu_sigma / tR[ii]**2
return aveg_sigma
def obs_sigma_func( rp, f_off, sigma_off, z, c_mass, lgM, v_m):
# off_sigma = off_sigma_func( rp, sigma_off, z, c_mass, lgM, v_m)
off_sigma = cc_off_sigma_func( rp, sigma_off, z, c_mass, lgM, v_m)
norm_sigma = sigmam( rp, lgM, z, c_mass)
obs_sigma = f_off * off_sigma + ( 1 - f_off ) * norm_sigma
return obs_sigma
def delta_sigma_func(rp, f_off, sigma_off, z, c_mass, lgM, v_m, N_grid = 100):
sigma_arr = obs_sigma_func( rp, f_off, sigma_off, z, c_mass, lgM, v_m )
aveg_sigma = aveg_sigma_func( rp, sigma_arr, N_grid = N_grid)
delta_sigma = aveg_sigma - sigma_off
return delta_sigma
### === ### SB profile to delta SB profile
def SB_to_Lumi_func(sb_arr, obs_z, band_str):
"""
sb_arr : need in terms of absolute magnitude, in AB system
"""
if band_str == 'r':
Mag_dot = Mag_sun[0]
if band_str == 'g':
Mag_dot = Mag_sun[1]
if band_str == 'i':
Mag_dot = Mag_sun[2]
# luminosity, in unit of L_sun / pc^2
lumi = 10**( -0.4 * (sb_arr - Mag_dot + 21.572 - 10 * np.log10( obs_z + 1 ) ) )
Lumi = lumi * 1e6 # in unit of L_sun / kpc^2
return Lumi
def aveg_lumi_func(rp, lumi_arr, N_grid = 100):
NR = len( rp )
aveg_lumi = np.zeros( NR, dtype = np.float32 )
intep_lumi_F = interp.interp1d( rp, lumi_arr, kind = 'linear', fill_value = 'extrapolate',)
for ii in range( NR ):
new_rp = np.logspace(-3, np.log10( rp[ii] ), N_grid)
new_lumi = intep_lumi_F( new_rp )
cumu_lumi = integ.simps( new_rp * new_lumi, new_rp)
aveg_lumi[ii] = 2 * cumu_lumi / rp[ii]**2
return aveg_lumi
def delta_SB_func( rp, lumi_arr, N_grid = 100):
aveg_lumi = aveg_lumi_func(rp, lumi_arr, N_grid = N_grid)
delta_lumi = aveg_lumi - lumi_arr
return delta_lumi
def sersic_func(r, Ie, re, ndex):
belta = 2 * ndex - 0.324
fn = -1 * belta * ( r / re )**(1 / ndex) + belta
Ir = Ie * np.exp( fn )
return Ir
### === ### fitting function
def prior_p_func( p ):
M0, c_mass, m2l, f_off, sigma_off, Ie, Re, n = p[:]
identi_0 = ( 1e-2 < Ie < 1e1 ) & ( 5 < Re < 50) & ( 1 < n < 9 ) & (1 < c_mass < 50) & (2e2 < m2l < 5e3)
identi_1 = (13.5 <= M0 <= 15) & ( 0 < f_off < 1) & ( 10 < sigma_off < 400)
if ( identi_0 & identi_1 ):
return 0
return -np.inf
def ln_p_func(p, x, y, params, yerr):
pre_p = prior_p_func( p )
if not np.isfinite( pre_p ):
return -np.inf
else:
M0, c_mass, m2l, f_off, sigma_off, Ie, Re, n = p[:]
z0, cov_mx, v_m = params[:]
## sersic
I_r = sersic_func( x, Ie, Re, n )
aveg_I = aveg_sigma_func( x, I_r )
delta_I = aveg_I - I_r
## miscen-nfw
off_sigma = obs_sigma_func( x * h, f_off, sigma_off, z0, c_mass, M0, v_m) # unit M_sun * h / kpc^2
mean_off_sigma = aveg_sigma_func( x * h, off_sigma )
off_delta_sigma = mean_off_sigma - off_sigma
off_D_sigma = off_delta_sigma * h * 1e-6 # unit M_sun / pc^2
## model SB
mode_mu = delta_I + off_D_sigma / m2l
cov_inv = np.linalg.pinv( cov_mx )
delta = mode_mu - y
chi2 = -0.5 * delta.T.dot( cov_inv ).dot(delta)
return pre_p + chi2
def err_fit_func(p, x, y, params, yerr):
# m2l Ie, Re, n = p[:]
m2l = p
cov_mx, input_sigma = params[:]
## model SB
mode_mu = input_sigma / m2l
cov_inv = np.linalg.pinv( cov_mx )
delta = mode_mu - y
chi2 = delta.T.dot( cov_inv ).dot(delta)
return chi2
## use high mass bin for test
cat_lis = ['low_BCG_star-Mass', 'high_BCG_star-Mass']
fig_name = ['low $M_{\\ast}$', 'high $M_{\\ast}$'] ## or line name
color_s = ['r', 'g', 'b']
line_c = ['b', 'r']
line_s = ['--', '-']
z_ref = 0.25
Dl_ref = Test_model.luminosity_distance( z_ref ).value
### === ### take sub-sample profile to estimate error on delta_mu (the surface brightness )
def cov_arr():
BG_path = '/home/xkchen/mywork/ICL/code/rig_common_cat/mass_bin_BG/'
Lpro_path = '/home/xkchen/mywork/ICL/code/rig_common_cat/SB_in_Lsun/'
N_samples = 30
for mm in range( 2 ):
for kk in range( 3 ):
nt_r, nt_l, nt_ml, nt_Dl = [], [], [], []
for nn in range( N_samples ):
n_dat = pds.read_csv( BG_path + '%s_%s-band_jack-sub-%d_BG-sub_SB.csv' % (cat_lis[mm], band[kk], nn),)
nn_r, nn_sb, nn_err = np.array( n_dat['R']), np.array( n_dat['BG_sub_SB']), np.array( n_dat['sb_err'])
idvx = ( nn_r >= 10 ) & ( nn_r <= 1e3)
nn_mag = 22.5 - 2.5 * np.log10( nn_sb[idvx] )
nn_Mag = nn_mag - 5 * np.log10( Dl_ref * 10**6 / 10)
nn_Lumi = SB_to_Lumi_func( nn_Mag, z_ref, band[kk] )
aveg_Lumi = aveg_lumi_func( nn_r[idvx], nn_Lumi )
delt_Lumi = aveg_Lumi - nn_Lumi
keys = [ 'R', 'Lumi', 'mean_Lumi', 'delta_Lumi' ]
values = [ nn_r[idvx], nn_Lumi, aveg_Lumi, delt_Lumi ]
fill = dict(zip( keys, values) )
out_data = pds.DataFrame( fill )
out_data.to_csv( Lpro_path + '%s_%s-band_jack-sub-%d_Lumi-pros.csv' % (cat_lis[mm], band[kk], nn),)
nt_r.append( nn_r[idvx] )
nt_l.append( nn_Lumi )
nt_ml.append( aveg_Lumi )
nt_Dl.append( delt_Lumi )
### jack-mean pf mass and lumi profile
aveg_R, aveg_L, aveg_L_err = arr_jack_func( nt_l, nt_r, N_samples)[:3]
aveg_R, aveg_mL, aveg_mL_err = arr_jack_func( nt_ml, nt_r, N_samples)[:3]
aveg_R, aveg_DL, aveg_DL_err = arr_jack_func( nt_Dl, nt_r, N_samples)[:3]
keys = ['R', 'Lumi', 'Lumi_err', 'm_Lumi', 'm_Lumi_err', 'd_Lumi', 'd_Lumi_err']
values = [ aveg_R, aveg_L, aveg_L_err, aveg_mL, aveg_mL_err, aveg_DL, aveg_DL_err ]
fill = dict(zip( keys, values) )
out_data = pds.DataFrame( fill )
out_data.to_csv( Lpro_path + '%s_%s-band_aveg-jack_Lumi-pros.csv' % (cat_lis[mm], band[kk]),)
## cov-matrix
for mm in range( 2 ):
for kk in range( 3 ):
nt_r, nt_l, nt_ml, nt_Dl = [], [], [], []
for nn in range( N_samples ):
d_dat = pds.read_csv( Lpro_path + '%s_%s-band_jack-sub-%d_Lumi-pros.csv' % (cat_lis[mm], band[kk], nn) )
dd_r, dd_L, dd_mL, dd_DL = np.array( d_dat['R'] ), np.array( d_dat['Lumi'] ), np.array( d_dat['mean_Lumi'] ), np.array( d_dat['delta_Lumi'] )
## cov-matrix calculate with unit L_sun / pc**2
nt_r.append( dd_r )
nt_l.append( dd_L * 1e-6 )
nt_ml.append( dd_mL * 1e-6 )
nt_Dl.append( dd_DL * 1e-6 )
R_m_0, cov_MX_0, cor_MX_0 = cov_MX_func( nt_r, nt_l, id_jack = True)
R_m_1, cov_MX_1, cor_MX_1 = cov_MX_func( nt_r, nt_Dl, id_jack = True)
with h5py.File( Lpro_path + '%s_%s-band_Lumi-pros_cov-cor.h5' % (cat_lis[mm], band[kk]), 'w') as f:
f['cov_Mx'] = np.array( cov_MX_0 )
f['cor_Mx'] = np.array( cor_MX_0 )
f['R_kpc'] = np.array( R_m_0 )
with h5py.File( Lpro_path + '%s_%s-band_Delta-Lumi-pros_cov-cor.h5' % (cat_lis[mm], band[kk]), 'w') as f:
f['cov_Mx'] = np.array( cov_MX_1 )
f['cor_Mx'] = np.array( cor_MX_1 )
f['R_kpc'] = np.array( R_m_1 )
fig = plt.figure( figsize = (13.12, 4.8) )
ax0 = fig.add_axes([0.05, 0.10, 0.45, 0.80])
ax1 = fig.add_axes([0.50, 0.10, 0.45, 0.80])
ax0.set_title( fig_name[ mm ] + ',%s band, coV_arr' % band[kk] )
tf = ax0.imshow(cov_MX_0, origin = 'lower', cmap = 'rainbow', norm = mpl.colors.LogNorm(),)
ax0.set_ylim(-0.5, len(R_m_0) - 0.5 )
ax1.set_title( fig_name[ mm ] + ', %s band, coR_arr' % band[kk] )
tf = ax1.imshow(cor_MX_0, origin = 'lower', cmap = 'bwr', vmin = -1, vmax = 1,)
ax1.set_ylim(-0.5, len(R_m_0) - 0.5 )
plt.savefig('/home/xkchen/figs/%s_%s-band_Lumi-pros_coV-coR_arr.jpg' % (cat_lis[mm], band[kk]), dpi = 300)
plt.close()
fig = plt.figure( figsize = (13.12, 4.8) )
ax0 = fig.add_axes([0.05, 0.10, 0.45, 0.80])
ax1 = fig.add_axes([0.50, 0.10, 0.45, 0.80])
ax0.set_title( fig_name[ mm ] + ',%s band, coV_arr' % band[kk] )
tf = ax0.imshow(cov_MX_1, origin = 'lower', cmap = 'rainbow', norm = mpl.colors.LogNorm(),)
ax0.set_ylim(-0.5, len(R_m_1) - 0.5 )
ax1.set_title( fig_name[ mm ] + ', %s band, coR_arr' % band[kk] )
tf = ax1.imshow(cor_MX_1, origin = 'lower', cmap = 'bwr', vmin = -1, vmax = 1,)
ax1.set_ylim(-0.5, len(R_m_1) - 0.5 )
plt.savefig('/home/xkchen/figs/%s_%s-band_Delta-Lumi-pros_coV-coR_arr.jpg' % (cat_lis[mm], band[kk]), dpi = 300)
plt.close()
fig = plt.figure( figsize = (19.84, 4.8) )
ax0 = fig.add_axes([0.05, 0.10, 0.27, 0.83])
ax1 = fig.add_axes([0.37, 0.10, 0.25, 0.83])
ax2 = fig.add_axes([0.67, 0.10, 0.25, 0.83])
ax0.set_title( '$\\mu(r)$' )
ax1.set_title( '$\\bar{\\mu}(<r)$' )
ax2.set_title( '$\\Delta \\mu$ = $\\bar{\\mu}(<r)$ - $\\mu(r)$')
for mm in range( 2 ):
for kk in range( 3 ):
d_dat = pds.read_csv( Lpro_path + '%s_%s-band_aveg-jack_Lumi-pros.csv' % (cat_lis[mm], band[kk]) )
dd_R, dd_L, dd_L_err = np.array( d_dat['R'] ), np.array( d_dat['Lumi'] ), np.array( d_dat['Lumi_err'] )
dd_mL, dd_mL_err = np.array( d_dat['m_Lumi'] ), np.array( d_dat['m_Lumi_err'] )
dd_DL, dd_DL_err = np.array( d_dat['d_Lumi'] ), np.array( d_dat['d_Lumi_err'] )
if kk == 0:
ax0.plot( dd_R, dd_L, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = fig_name[mm] + ',%s band' % band[kk],)
ax0.fill_between( dd_R, y1 = dd_L - dd_L_err, y2 = dd_L + dd_L_err, color = color_s[kk], alpha = 0.12,)
ax1.plot( dd_R, dd_mL, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = fig_name[mm] + ',%s band' % band[kk],)
ax1.fill_between( dd_R, y1 = dd_mL - dd_mL_err, y2 = dd_mL + dd_mL_err, color = color_s[kk], alpha = 0.12,)
ax2.plot( dd_R, dd_DL, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = fig_name[mm] + ',%s band' % band[kk],)
ax2.fill_between( dd_R, y1 = dd_DL - dd_DL_err, y2 = dd_DL + dd_DL_err, color = color_s[kk], alpha = 0.12,)
else:
ax0.plot( dd_R, dd_L, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = '%s band' % band[kk],)
ax0.fill_between( dd_R, y1 = dd_L - dd_L_err, y2 = dd_L + dd_L_err, color = color_s[kk], alpha = 0.12,)
ax1.plot( dd_R, dd_mL, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = '%s band' % band[kk],)
ax1.fill_between( dd_R, y1 = dd_mL - dd_mL_err, y2 = dd_mL + dd_mL_err, color = color_s[kk], alpha = 0.12,)
ax2.plot( dd_R, dd_DL, ls = line_s[mm], color = color_s[kk], alpha = 0.5, label = '%s band' % band[kk],)
ax2.fill_between( dd_R, y1 = dd_DL - dd_DL_err, y2 = dd_DL + dd_DL_err, color = color_s[kk], alpha = 0.12,)
ax0.legend( loc = 1,)
ax0.set_xscale('log')
ax0.set_yscale('log')
ax0.set_ylabel('$\\mu(r) [L_{\\odot}/ kpc^2]$')
ax0.set_xlabel('R[kpc]')
ax0.set_xlim( 1e1, 1e3)
ax0.set_ylim( 5e2, 1e7)
ax1.legend( loc = 1,)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylabel('$\\bar{\\mu}(<r) [L_{\\odot}/ kpc^2]$')
ax1.set_xlabel('R[kpc]')
ax1.set_xlim( 1e1, 1e3)
ax1.set_ylim( 5e2, 1e7)
ax2.legend( loc = 1,)
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_ylabel('$\\Delta\\mu(r) [L_{\\odot}/ kpc^2]$')
ax2.set_xlabel('R[kpc]')
ax2.set_xlim( 1e1, 1e3)
ax2.set_ylim( 5e2, 1e7)
plt.savefig('/home/xkchen/delta-mu_compare.png', dpi = 300)
plt.close()
# cov_arr()
### === ### MCMC fitting
a_ref = 1 / (1 + z_ref)
v_m = 200 # rho_mean = 200 * rho_c * omega_m
c_mass = [5.87, 6.95]
Mh0 = [14.24, 14.24]
sigm_off = [230, 210]
f_off = [0.37, 0.20]
path = '/home/xkchen/mywork/ICL/code/rig_common_cat/SB_in_Lsun/'
out_path = '/home/xkchen/figs/'
for mm in range( 2 ):
for kk in range( 3 ):
d_dat = pds.read_csv( path + '%s_%s-band_aveg-jack_Lumi-pros.csv' % (cat_lis[mm], band[kk]) )
dd_R, dd_L, dd_L_err = np.array( d_dat['R'] ), np.array( d_dat['Lumi'] ), np.array( d_dat['Lumi_err'] )
dd_mL, dd_mL_err = np.array( d_dat['m_Lumi'] ), np.array( d_dat['m_Lumi_err'] )
dd_DL, dd_DL_err = np.array( d_dat['d_Lumi'] ), np.array( d_dat['d_Lumi_err'] )
## use delta_lumi for fitting, in unit L_sun / pc^2
_dd_R = dd_R
_DL = dd_DL * 1e-6
_DL_err = dd_DL_err * 1e-6
## cov_arr
with h5py.File( path + '%s_%s-band_Delta-Lumi-pros_cov-cor.h5' % (cat_lis[mm], band[kk]), 'r') as f:
cov_MX = np.array(f['cov_Mx'])
## compare large scale signal only
idx1 = dd_R >= 10
cut_dex = np.where( idx1 == True )[0][0]
com_r = dd_R[idx1]
com_sb = dd_DL[idx1] * 1e-6
com_err = dd_DL_err[idx1] * 1e-6
p_cov_MX = cov_MX[ cut_dex:, cut_dex:]
## miscen-nfw
norm_sigma = obs_sigma_func( _dd_R * h, f_off[mm], sigm_off[mm], z_ref, c_mass[mm], Mh0[mm], v_m) # unit M_sun * h / kpc^2
mean_norm_sigma = aveg_sigma_func( _dd_R * h, norm_sigma )
delt_n_sigma = mean_norm_sigma - norm_sigma
delt_n_sigma = delt_n_sigma * h * 1e-6
m2l_fit = 1000
## compare
mode_mu = delt_n_sigma / m2l_fit
## other halo profile
rho_Hern = profile_hernquist.HernquistProfile( M = 10**(Mh0[mm]), c = c_mass[mm], z = z_ref, mdef = '200m')
delta_Hern = rho_Hern.deltaSigma( _dd_R * h )
delta_Hern = delta_Hern * h * 1e-6
plt.figure()
ax = plt.subplot(111)
ax.set_title( fig_name[mm] + ',%s band' % band[kk])
ax.plot( _dd_R, _DL, ls = '-', color = 'k', alpha = 0.45, label = 'signal',)
ax.fill_between( _dd_R, y1 = _DL - _DL_err, y2 = _DL + _DL_err, color = 'k', alpha = 0.12,)
ax.plot( _dd_R, mode_mu, 'g-', alpha = 0.45, label = '$NFW_{mis}$')
ax.plot( _dd_R, _DL - mode_mu, ls = '--', color = 'g', alpha = 0.45, label = 'signal - $NFW_{mis}$')
ax.plot( _dd_R, delta_Hern / m2l_fit, 'b-', alpha = 0.45, label = '$ Hernquist $')
ax.plot( _dd_R, _DL - delta_Hern / m2l_fit, ls = '--', color = 'b', alpha = 0.45, label = 'signal - $ Hernquist $')
ax.set_xlim(1e1, 1e3)
# ax.set_ylim(1e-2, 1e1)
ax.set_yscale('log')
ax.legend( loc = 1)
ax.set_xscale('log')
ax.set_xlabel('R[kpc]')
ax.set_ylabel('$ \\Delta\\mu $ $ [L_{\\odot} / pc^{2}] $')
ax.grid(which = 'both', axis = 'both', alpha = 0.25,)
plt.savefig('/home/xkchen/figs/%s_%s-band_sersic+1h-miscen_Delta-L_mcmc_fit.png' % (cat_lis[mm], band[kk]), dpi = 300 )
plt.close()
raise
| [
"cxkast@gmail.com"
] | cxkast@gmail.com |
e0ae3f8df1d6dacc769316b76657cf29b105ce4a | eaf2b6edff6d1020cf24028cf1d2f1df42b5a263 | /420-strong-password-checker/strong-password-checker.py | ec3726dbb2c42aaff8d8787e5b69e2cfdbaaf1a6 | [] | no_license | zhangoneone/leetcode | 1ac1a960d7aff5b6b8ddb85a48efc4f6c49c582c | 3f8632e5b436293c304e6df6326adc556be6b842 | refs/heads/master | 2020-04-21T06:57:47.415724 | 2018-12-20T01:33:58 | 2018-12-20T01:33:58 | 169,380,067 | 2 | 0 | null | 2019-02-06T09:17:31 | 2019-02-06T09:17:31 | null | UTF-8 | Python | false | false | 1,950 | py | # -*- coding:utf-8 -*-
# A password is considered strong if below conditions are all met:
#
#
# It has at least 6 characters and at most 20 characters.
# It must contain at least one lowercase letter, at least one uppercase letter, and at least one digit.
# It must NOT contain three repeating characters in a row ("...aaa..." is weak, but "...aa...a..." is strong, assuming other conditions are met).
#
#
# Write a function strongPasswordChecker(s), that takes a string s as input, and return the MINIMUM change required to make s a strong password. If s is already strong, return 0.
#
# Insertion, deletion or replace of any one character are all considered as one change.
class Solution(object):
def strongPasswordChecker(self, s):
"""
:type s: str
:rtype: int
"""
missing_type = 3
if any(c.islower() for c in s): missing_type -= 1
if any(c.isupper() for c in s): missing_type -= 1
if any(c.isdigit() for c in s): missing_type -= 1
change = 0
one = two = 0
p = 2
while p < len(s):
if s[p] == s[p-1] == s[p-2]:
length = 2
while p < len(s) and s[p] == s[p-1]:
length += 1
p += 1
change += length / 3
if length % 3 == 0: one += 1
elif length % 3 == 1: two += 1
else:
p += 1
if len(s) < 6:
return max(missing_type, 6 - len(s))
elif len(s) <= 20:
return max(missing_type, change)
else:
delete = len(s) - 20
change -= min(delete, one)
change -= min(max(delete - one, 0), two * 2) / 2
change -= max(delete - one - 2 * two, 0) / 3
return delete + max(missing_type, change)
| [
"foreverbonfy@163.com"
] | foreverbonfy@163.com |
92b27e668bf00bfed941b76018d92906c856691a | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/boosted_trees/estimator_batch/custom_loss_head.pyi | 9cfa47bc65db82426ee6bf56237e1355c15c69f7 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | pyi | # Stubs for tensorflow.contrib.boosted_trees.estimator_batch.custom_loss_head (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.ops import array_ops as array_ops, math_ops as math_ops
from typing import Any as Any, Optional as Optional
class CustomLossHead(head_lib._RegressionHead):
def __init__(self, loss_fn: Any, link_fn: Any, logit_dimension: Any, head_name: Optional[Any] = ..., weight_column_name: Optional[Any] = ..., metrics_fn: Optional[Any] = ...) -> None: ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
27de0f4f9000039cfce3979c024fc9b34b2c29a8 | 0859a864b1270164fe44a878ab12cfb3302c36bf | /abc161/c.py | aeae10de769322ffec461b483cc07e0c0f9ec2d3 | [] | no_license | wataoka/atcoder | f359d49ab6e0db39c019d9f6d2e8b92d35f723c4 | b91465dd5f655d05b89485fc7ad222283c5958f5 | refs/heads/master | 2021-04-15T06:02:59.593965 | 2020-05-11T04:38:23 | 2020-05-11T04:38:23 | 126,754,342 | 0 | 0 | null | 2020-02-28T02:31:03 | 2018-03-26T00:51:12 | Python | UTF-8 | Python | false | false | 82 | py | n, k = map(int, input().split())
can = n%k
print(min(abs(can-k), abs(k-can), can)) | [
"wataoka@stu.kobe-u.ac.jp"
] | wataoka@stu.kobe-u.ac.jp |
d29e818b1a22fbe66308e12cac57776e2206777c | d94b78aeb8f7ea79545397f15b8b6d5a4b3e1635 | /test5.py | 916f2676d49ac4604269084f64e1708bedd82ce1 | [] | no_license | levylll/leetcode | 6a32c547c6a0d5aa07126a2ddc46b03b998ae5ad | 319f73e931b081fbf817769da5596c8eefd830a3 | refs/heads/master | 2021-06-19T01:30:03.113632 | 2021-06-11T07:46:02 | 2021-06-11T07:46:02 | 174,638,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | class Solution(object):
def gen_p1(self, idx, s):
tmp_max = s[idx]
move = 1
while True:
if idx - move >= 0 and idx + move < len(s) and s[idx-move] == s[idx+move]:
tmp_max = s[idx-move] + tmp_max + s[idx+move]
move += 1
else:
return tmp_max
def gen_p2(self, idx, s):
if s[idx] == s[idx + 1]:
tmp_max = s[idx] + s[idx + 1]
else:
return ''
move = 1
while True:
if idx - move >= 0 and idx + move + 1 < len(s) and s[idx-move] == s[idx+move+1]:
tmp_max = s[idx-move] + tmp_max + s[idx+move+1]
move += 1
else:
return tmp_max
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s:
return ''
max_s = ''
for idx, letter in enumerate(s):
tmp_max = self.gen_p1(idx, s)
if len(tmp_max) >= len(max_s):
max_s = tmp_max
if idx == len(s) - 1:
continue
tmp_max = self.gen_p2(idx, s)
if len(tmp_max) >= len(max_s):
max_s = tmp_max
return max_s
s = Solution()
a = "arra"
res = s.longestPalindrome(a)
print(res)
| [
"levylll@163.com"
] | levylll@163.com |
17c670a09a49ec13333d28a5078af3403e9357e3 | 72765a898e97d308c50a8b309b049d568fbbb622 | /examples/bio2bioes.py | 184bb58b88326fc54511acdb064c7ad1b0c3e6b6 | [] | no_license | gswyhq/bert4keras | 95a2fbdfbe49edb55ff56e8b1d75fd0432c440fb | e71a3acdc89b76f90e4d45527d228cc423f7cf1e | refs/heads/master | 2020-08-14T00:50:16.613380 | 2020-04-13T12:57:48 | 2020-04-13T12:57:48 | 215,066,934 | 0 | 0 | null | 2019-10-14T14:34:37 | 2019-10-14T14:34:37 | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/python3
# coding: utf-8
import os
import sys
import unicodedata
def generator_load_data(data_path):
# print('读取文件:{}'.format(data_path))
with open(data_path, "r") as f:
text = f.readline()
while text:
text = unicodedata.normalize('NFKD', text).strip()
if '/' in text:
text = text.strip()
data = [[word.rsplit('/', maxsplit=1) for word in text.rsplit('\t', maxsplit=1)[0].split() if
word[1] == '/'], text.rsplit('\t', maxsplit=1)[-1]]
yield data
text = f.readline()
def bio2bioes(word_flag):
"""
BIO标注格式转换为BIOES格式
:param word_flag: [['谢', 'B-Shiyi'], ['德', 'I-Shiyi'], ['风', 'I-Shiyi'], ['的', 'O'], ['出', 'O'], ['生', 'O'], ['日', 'O'], ['期', 'O'], ['是', 'O']]
:return:
"""
new_word_flag = []
words_len = len(word_flag)
for _index, (word, flag) in enumerate(word_flag, 1):
if flag[0] in ['B', 'O']:
if flag[0] == 'B' and (_index == words_len or word_flag[_index][1][0] == 'O'):
# 最后,或者独立成词
flag = 'S' + flag[1:]
new_word_flag.append([word, flag])
elif flag[0] == 'I':
if _index == words_len or word_flag[_index][1][0] == 'O':
flag = 'E' + flag[1:]
new_word_flag.append([word, flag])
else:
new_word_flag.append([word, flag])
return new_word_flag
def main():
TRAIN_DATA_PATH = "../data/ner_rel_train.txt"
DEV_DATA_PATH = "../data/ner_rel_dev.txt"
TEST_DATA_PATH = "../data/ner_rel_test.txt"
for bio_file_name in [TRAIN_DATA_PATH, DEV_DATA_PATH, TEST_DATA_PATH]:
bioes_file_name = bio_file_name[:-4] + '_BIOES.txt'
with open(bioes_file_name, 'w')as f2:
for word_flag, label in generator_load_data(bio_file_name):
word_flag = bio2bioes(word_flag)
f2.write('{}\t{}\n'.format(' '.join('/'.join(w) for w in word_flag), label))
if __name__ == '__main__':
main() | [
"gswyhq@126.com"
] | gswyhq@126.com |
f9d1ff55ba1de3c5bd0d2a7619b945862ecc9e1f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hilens/huaweicloudsdkhilens/v3/model/create_task_request.py | 363ab85585e3ef85232d5c1955b7bd770a145264 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,181 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'deployment_id': 'str',
'body': 'TaskRequest'
}
attribute_map = {
'deployment_id': 'deployment_id',
'body': 'body'
}
def __init__(self, deployment_id=None, body=None):
"""CreateTaskRequest
The model defined in huaweicloud sdk
:param deployment_id: 部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:type deployment_id: str
:param body: Body of the CreateTaskRequest
:type body: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
self._deployment_id = None
self._body = None
self.discriminator = None
self.deployment_id = deployment_id
if body is not None:
self.body = body
@property
def deployment_id(self):
"""Gets the deployment_id of this CreateTaskRequest.
部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:return: The deployment_id of this CreateTaskRequest.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""Sets the deployment_id of this CreateTaskRequest.
部署ID,从专业版HiLens控制台部署管理[获取部署列表](getDeploymentListUsingGET.xml)获取
:param deployment_id: The deployment_id of this CreateTaskRequest.
:type deployment_id: str
"""
self._deployment_id = deployment_id
@property
def body(self):
"""Gets the body of this CreateTaskRequest.
:return: The body of this CreateTaskRequest.
:rtype: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateTaskRequest.
:param body: The body of this CreateTaskRequest.
:type body: :class:`huaweicloudsdkhilens.v3.TaskRequest`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
11da6a73feea9cad9d0a412ebabf156cad4d81c4 | 193b35f8acaae37b43fe680bf9a6a3111db3b9c7 | /myPython/class/__hook__.py | d28d28c64b8393a75ad01283dd5c8b737bb64977 | [] | no_license | zerosum99/python_basic | 6726d0d5210fdff1e22f452470b515478f64b7cb | 4b9e2b3478472830d901748bd6a2ac84c3dcc684 | refs/heads/master | 2021-05-23T05:41:01.742846 | 2021-05-16T09:16:39 | 2021-05-16T09:16:39 | 94,850,543 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 06 16:41:23 2016
@author: 06411
"""
import abc
def interface(*attributes):
def decorator(Base):
def checker(Other):
return all(hasattr(Other, a) for a in attributes)
def ins_checker(Other):
if type(Other) == Base :
return True
return False
def __subclasshook__(cls, Other):
if checker(Other):
return True
return NotImplemented
def __instancecheck__(cls, Other):
return ins_checker(Other)
Base.__subclasshook__ = classmethod(__subclasshook__)
Base.__instancecheck__ = classmethod(__instancecheck__)
return Base
return decorator
@interface("x", "y")
class Foo(object):
__metaclass__ = abc.ABCMeta
def x(self): return 5
def y(self): return 10
class Bar(object):
def x(self): return "blah"
def y(self): return "blah"
class Baz(object):
def __init__(self):
self.x = "blah"
self.y = "blah"
class attrdict(dict):
def __getattr__(self, attr):
return self[attr]
f = Foo()
b = Bar()
z = Baz()
t = attrdict({"x":27.5, "z":37.5})
print isinstance(f, Foo)
print isinstance(b, Foo)
print isinstance(z, Foo)
print isinstance(t, Foo)
print "hook ",Foo.__subclasshook__(f),Foo.__subclasshook__(t)
print "instance ",Foo.__instancecheck__(f),Foo.__instancecheck__(b) | [
"myjlms99@gmail.com"
] | myjlms99@gmail.com |
52e0cb686f4865a2dfb100dd8c5a022744cba825 | 26fc334777ce27d241c67d97adc1761e9d23bdba | /tests/django_tests/tests/auth_tests/test_forms.py | b1d55c9749b35ad4afa79a1747f7e39ee9719130 | [
"BSD-3-Clause"
] | permissive | alihoseiny/djongo | 1434c9e78c77025d7e0b3330c3a40e9ea0029877 | e2edf099e398573faa90e5b28a32c3d7f1c5f1e9 | refs/heads/master | 2020-03-27T23:27:02.530397 | 2018-08-30T14:44:37 | 2018-08-30T14:44:37 | 147,317,771 | 2 | 1 | BSD-3-Clause | 2018-09-04T09:00:53 | 2018-09-04T09:00:53 | null | UTF-8 | Python | false | false | 41,955 | py | import datetime
import re
from importlib import reload
from unittest import mock
import django
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail, signals
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
def reload_auth_forms(sender, setting, value, enter, **kwargs):
if setting == 'AUTH_USER_MODEL':
reload(django.contrib.auth.forms)
class ReloadFormsMixin:
@classmethod
def setUpClass(cls):
super().setUpClass()
signals.setting_changed.connect(reload_auth_forms)
@classmethod
def tearDownClass(cls):
signals.setting_changed.disconnect(reload_auth_forms)
super().tearDownClass()
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
cls.u7 = ExtensionUser.objects.create(username='extension_client', date_of_birth='1998-02-24')
class UserCreationFormTest(ReloadFormsMixin, TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
with override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser'):
from django.contrib.auth.forms import UserCreationForm
self.assertEqual(UserCreationForm.Meta.model, ExtensionUser)
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
# reload_auth_forms() reloads the form.
from django.contrib.auth.forms import UserCreationForm
self.assertEqual(UserCreationForm.Meta.model, User)
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': 'test@client222.com',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': 'testclient@example.com',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_user_create_form_validates_password_with_all_data(self):
"""UserCreationForm password validation uses all of the form's data."""
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username', 'email', 'first_name', 'last_name')
form = CustomUserCreationForm({
'username': 'testuser',
'password1': 'testpassword',
'password2': 'testpassword',
'first_name': 'testpassword',
'last_name': 'lastname',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['password2'],
['The password is too similar to the first name.'],
)
def test_with_custom_user_model(self):
with override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser'):
data = {
'username': 'test_username',
'password1': 'test_password',
'password2': 'test_password',
}
from django.contrib.auth.forms import UserCreationForm
self.assertEqual(UserCreationForm.Meta.model, ExtensionUser)
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
def test_customer_user_model_with_different_username_field(self):
with override_settings(AUTH_USER_MODEL='auth_tests.CustomUser'):
from django.contrib.auth.forms import UserCreationForm
self.assertEqual(UserCreationForm.Meta.model, CustomUser)
data = {
'email': 'testchange@test.com',
'password1': 'test_password',
'password2': 'test_password',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that rejects inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.ModelBackend'])
def test_inactive_user_incorrect_password(self):
"""An invalid login doesn't leak the inactive status of a user."""
data = {
'username': 'inactive',
'password': 'incorrect',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
# Use an authentication backend that allows inactive users.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_username_field_max_length_matches_user_model(self):
self.assertEqual(CustomEmailField._meta.get_field('username').max_length, 255)
data = {
'username': 'u' * 255,
'password': 'pwd',
'email': 'test@example.com',
}
CustomEmailField.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 255)
self.assertEqual(form.errors, {})
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_username_field_max_length_defaults_to_254(self):
self.assertIsNone(IntegerUsernameUser._meta.get_field('username').max_length)
data = {
'username': '0123456',
'password': 'password',
}
IntegerUsernameUser.objects.create_user(**data)
form = AuthenticationForm(None, data)
self.assertEqual(form.fields['username'].max_length, 254)
self.assertEqual(form.errors, {})
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
def test_get_invalid_login_error(self):
error = AuthenticationForm().get_invalid_login_error()
self.assertIsInstance(error, forms.ValidationError)
self.assertEqual(
error.message,
'Please enter a correct %(username)s and password. Note that both '
'fields may be case-sensitive.',
)
self.assertEqual(error.code, 'invalid_login')
self.assertEqual(error.params, {'username': 'username'})
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
class UserChangeFormTest(ReloadFormsMixin, TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
with override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser'):
from django.contrib.auth.forms import UserChangeForm
self.assertEqual(UserChangeForm.Meta.model, ExtensionUser)
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
fields = ('username', 'password', 'date_of_birth')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=self.u7)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
# reload_auth_forms() reloads the form.
from django.contrib.auth.forms import UserChangeForm
self.assertEqual(UserChangeForm.Meta.model, User)
def test_with_custom_user_model(self):
with override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser'):
from django.contrib.auth.forms import UserChangeForm
self.assertEqual(UserChangeForm.Meta.model, ExtensionUser)
data = {
'username': 'testclient',
'date_joined': '1998-02-24',
'date_of_birth': '1998-02-24',
}
form = UserChangeForm(data, instance=self.u7)
self.assertTrue(form.is_valid())
def test_customer_user_model_with_different_username_field(self):
with override_settings(AUTH_USER_MODEL='auth_tests.CustomUser'):
from django.contrib.auth.forms import UserChangeForm
self.assertEqual(UserChangeForm.Meta.model, CustomUser)
user = CustomUser.custom_objects.create(email='test@test.com', date_of_birth='1998-02-24')
data = {
'email': 'testchange@test.com',
'date_of_birth': '1998-02-24',
}
form = UserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = 'test@mail.com'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.PBKDF2PasswordHasher'])
def test_render(self):
widget = ReadOnlyPasswordHashWidget()
value = 'pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5udm0='
self.assertHTMLEqual(
widget.render('name', value, {'id': 'id_password'}),
"""
<div id="id_password">
<strong>algorithm</strong>: pbkdf2_sha256
<strong>iterations</strong>: 100000
<strong>salt</strong>: a6Pucb******
<strong>hash</strong>: WmCkn9**************************************
</div>
"""
)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
| [
"nesdis@gmail.com"
] | nesdis@gmail.com |
86c71b01760e5102eb4d6243cbb87d4644df51c8 | bb88122fc4978b14e8a9b02d8c11f1ce67ea17d0 | /03_ML/m31_smote2_wine_quality.py | 0910ec5b43d9b87466d0691c7f65f194c02a62e3 | [] | no_license | star10919/Keras_ | c2c8a6f3d0e1a7ceba9e81dbc51ecfd12bd5fe78 | f3156b7db6e12feea075b46e94b09157f43a141c | refs/heads/main | 2023-08-17T22:44:54.324315 | 2021-10-24T02:47:00 | 2021-10-24T02:47:00 | 390,066,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | ### 데이터 증폭(smote 사용) - acc보다 F1 score가 높아짐
# y 라벨 개수 가장 큰 거 기준으로 동일하게 맞춰줌
### k_neighbors 의 디폴트 5 : 각 라벨의 개수가 5보다 커야 증폭(smote) 가능해짐! / 아니면 k_neighbors의 값을 낮춰주면 됨
# => k_neighbors 값 줄이면 score 떨어짐(연산수 줄기 때문에)
from imblearn.over_sampling import SMOTE
from sklearn.datasets import load_wine
import pandas as pd
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
import time
import warnings
warnings.filterwarnings('ignore')
datasets = pd.read_csv('../_data/winequality-white.csv', index_col=None, header=0, sep=';') # 비정제 데이터
datasets = datasets.values # 판다스 넘파이로 변환
x = datasets[:, :11]
y = datasets[:, 11]
print(x.shape, y.shape) # (4898, 11) (4898,)
print(pd.Series(y).value_counts()) #value_counts는 판다스 함수임!(넘파이X)
# 6.0 2198
# 5.0 1457
# 7.0 880
# 8.0 175
# 4.0 163
# 3.0 20
# 9.0 5
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, shuffle=True, random_state=9, stratify=y) # stratify=y_new : y_new 라벨의 비율로 나눠줌!!!
print(pd.Series(y_train).value_counts())
# 6.0 1648
# 5.0 1093
# 7.0 660
# 8.0 131
# 4.0 122
# 3.0 15
# 9.0 4
model = XGBClassifier(n_jobs=-1)
model.fit(x_train, y_train, eval_metric='mlogloss') # xgboost 쓰면 이발메트릭스 사용해야 함!
score = model.score(x_test, y_test)
print("model.score :", score) # model.score : 0.6563265306122449
########################################### smote 적용 ##############################################
print("=============================== smote 적용 ===============================")
smote = SMOTE(random_state=66, k_neighbors=3) #k_neighbors 의 디폴트 5 / 가장 작은 라벨인 9의 라벨 개수가 4이므로 디폴드인 5보다 작은 값으로 에러나니까 k_neighbors의 값을 낮춰줌
x_smote_train, y_smote_train = smote.fit_resample(x_train, y_train) # train만 smote(증폭) 시킴, test는 하지 않음
#####################################################################################################
print(pd.Series(y_smote_train).value_counts())
# 6.0 1648
# 5.0 1648
# 4.0 1648
# 9.0 1648
# 8.0 1648
# 7.0 1648
# 3.0 1648
print(x_smote_train.shape, y_smote_train.shape) #
print("smote 전 :", x_train.shape, y_train.shape)
print("smote 후 :", x_smote_train.shape, y_smote_train.shape)
print("smote전 레이블 값 분포 :\n", pd.Series(y_train).value_counts())
print("smote후 레이블 값 분포 :\n", pd.Series(y_smote_train).value_counts())
model2 = XGBClassifier(n_jobs=-1)
model2.fit(x_smote_train, y_smote_train, eval_metric='mlogloss')
score2 = model2.score(x_test, y_test)
print("model2.score :", score2) # model2.score : 0.6302040816326531 | [
"star10919@naver.com"
] | star10919@naver.com |
4abb72769c2e36fbb784efbff26050fd95f05cd0 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep010/Gather3_W_fix3blk_C_change/train/pyr_1s/L3/step10_a.py | 87b968c1551be20f7e6be09c21dabb3bcba21dce | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,310 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_1side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep010.W_w_M_to_C_pyr.pyr_1s.L3.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep010.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_6__3side_6__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
624b8b66742a21f03a4eaf8cd8f640fc0054d3f3 | f7be1846da14366ca8277dc3edc207766de838f0 | /ICPC Practice/minimum-distances.py | 0fdc9b8546501ad63e61fac9a7b45f125161e1d2 | [] | no_license | IsThatYou/Competitive-Programming | d440f9e78f8a982cd60aa8c81833b9e10208b29e | a924ac7087654402c7f7c4b62374c377178061ad | refs/heads/master | 2020-07-19T16:31:32.492635 | 2019-01-27T02:45:21 | 2019-01-27T02:45:21 | 73,759,128 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | length = int(input().strip())
A = [int(A_temp) for A_temp in input().strip().split(' ')]
lowest = 2000
for i in range(length):
for j in range(i + 1, length):
if A[i] == A[j]:
dis = abs(j - i)
if dis < lowest:
lowest = dis
if lowest != 2000:
print(lowest)
else:
print(-1) | [
"junlinwang18@gmail.com"
] | junlinwang18@gmail.com |
af2ab744148c104fa9b235621f90b30107208a7a | 8c2de4da068ba3ed3ce1adf0a113877385b7783c | /hyperion/bin/pack-wav-rirs.py | 0017798894270af755bafa05e4a9b4fd9b86d2b2 | [
"Apache-2.0"
] | permissive | hyperion-ml/hyperion | a024c718c4552ba3a03aae2c2ca1b8674eaebc76 | c4c9eee0acab1ba572843373245da12d00dfffaa | refs/heads/master | 2023-08-28T22:28:37.624139 | 2022-03-25T16:28:08 | 2022-03-25T16:28:08 | 175,275,679 | 55 | 20 | Apache-2.0 | 2023-09-13T15:35:46 | 2019-03-12T18:40:19 | Python | UTF-8 | Python | false | false | 1,865 | py | #!/usr/bin/env python
"""
Copyright 2020 Jesus Villalba (Johns Hopkins University)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
from jsonargparse import (
ArgumentParser,
ActionConfigFile,
ActionParser,
namespace_to_dict,
)
import time
import logging
import math
import numpy as np
from hyperion.hyp_defs import config_logger
from hyperion.io import SequentialAudioReader as AR
from hyperion.io import DataWriterFactory as DWF
def pack_wav_rirs(input_path, output_spec, **kwargs):
writer = DWF.create(output_spec, scp_sep=" ", compress=False)
t1 = time.time()
with AR(input_path, wav_scale=1) as reader:
for data in reader:
key, h, fs = data
if h.ndim == 2:
h = h[:, 0]
h_delay = np.argmax(np.abs(h))
h_max = h[h_delay]
h /= h_max
h[h < 1e-3] = 0
h = np.trim_zeros(h)
logging.info(
"Packing rir %s h_max=%f h_delay=%d h-length=%d"
% (key, h_max, h_delay, len(h))
)
writer.write([key], [h])
logging.info("Packed RIRS elapsed-time=%.f" % (time.time() - t1))
if __name__ == "__main__":
parser = ArgumentParser(description="Packs RIRs in wave format to h5/ark files")
parser.add_argument("--cfg", action=ActionConfigFile)
parser.add_argument("--input", dest="input_path", required=True)
parser.add_argument("--output", dest="output_spec", required=True)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
default=1,
choices=[0, 1, 2, 3],
type=int,
help="Verbose level",
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
pack_wav_rirs(**namespace_to_dict(args))
| [
"jesus.antonio.villalba@gmail.com"
] | jesus.antonio.villalba@gmail.com |
4bacc7bb356d16649339adbe3f7ae44936cef97b | b1cf54e4d6f969d9084160fccd20fabc12c361c2 | /leetcode/longest_substring.py | e189bb5bcce062b5ff99c66cba029c56af0f6dd4 | [] | no_license | zarkle/code_challenges | 88a53477d6f9ee9dd71577678739e745b9e8a694 | 85b7111263d4125b362184df08e8a2265cf228d5 | refs/heads/master | 2021-06-10T11:05:03.048703 | 2020-01-23T06:16:41 | 2020-01-23T06:16:41 | 136,668,643 | 0 | 1 | null | 2019-02-07T23:35:59 | 2018-06-08T21:44:26 | JavaScript | UTF-8 | Python | false | false | 1,746 | py | # https://leetcode.com/problems/longest-substring-without-repeating-characters/
# https://leetcode.com/problems/longest-substring-without-repeating-characters/solution/
# runtime 640 ms, 12.5%; memory 13.1 MB, 5.5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
i = longest = 0
chars = {}
while i <= len(s) - 1:
if s[i] not in chars:
chars[s[i]] = i
else:
if len(chars) > longest:
longest = len(chars)
temp = chars[s[i]] + 1
chars = {}
chars[s[temp]] = temp
i = temp
i += 1
if len(chars) > longest:
longest = len(chars)
return longest
# runtime 56 ms, 99.5%; memory 13.3 MB, 5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
ls = ''
longest = 0
for char in s:
if char in ls:
if len(ls) > longest:
longest = len(ls)
ls = ls[ls.index(char) + 1:]
ls += char
if len(ls) > longest:
longest = len(ls)
return longest
# runtime 48 ms, 100%; memory 13.3 MB, 5%
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
d = {}
start = -1
max = 0
for i in range(len(s)):
if s[i] in d and d[s[i]] > start:
start = d[s[i]]
d[s[i]] = i
else:
d[s[i]] = i
if i - start > max:
max = i - start
return max
# test cases: "au", "dvdf"
| [
"beverly.pham@gmail.com"
] | beverly.pham@gmail.com |
082a6d27401dee260c30ad1efb445313ff63bd21 | c9d81b5d0b258b57a06a99f43a79dc1ecd219488 | /Test_1.py | 6b0a1b0fdce78cf79cfec3ab37a0739d156c579e | [] | no_license | babiswas/Python-Design-Patterns | 4f4851cc55ae1bee8828f099f2c2610a36f4e8d5 | ea6e417880bab26bded60c67188a12623f74639f | refs/heads/master | 2020-07-30T05:28:10.030541 | 2019-09-26T17:54:21 | 2019-09-26T17:54:21 | 210,102,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from abc import ABC,abstractmethod
class Pet:
@abstractmethod
def speak(self):
pass
@abstractmethod
def having_food(self):
pass
class Cat(Pet):
def __init__(self,name):
self.name=name
def speak(self):
print(f"{self.name} Mew Mew")
def having_food(self):
print(f"{self.name} having milk")
class Dog(Pet):
def __init__(self,name):
self.name=name
def speak(self):
print(f"{self.name} bow bow")
def having_food(self):
print(f"{self.name} having meat")
def get_pet(pet="cat"):
pets=dict(cat=Cat("Meww"),dog=Dog("Bow Bow"))
return pets[pet]
if __name__=="__main__":
while True:
try:
key=input("Enter key")
obj=get_pet(key)
obj.speak()
obj.having_food()
except Exception as e:
print("Invalid key entered")
| [
"noreply@github.com"
] | babiswas.noreply@github.com |
78b2e5ad6eda0389d08f76cb7f95ff272be8fc13 | 152b74ed7d60d75a9d70f6637c107fff9b064ff9 | /Chapter03/MalGan/MalGAN_gen_adv_examples.py | 0289a0a9a76912a65061cdd41fbacd0dab425a1f | [
"MIT"
] | permissive | PacktPublishing/Machine-Learning-for-Cybersecurity-Cookbook | 1d7a50fb79b5da8c411eda9dc9cface4d0f78125 | 19b9757020cbcb09d9bb4249605fbb9c7322d92b | refs/heads/master | 2023-05-12T08:29:13.569598 | 2023-01-18T10:19:07 | 2023-01-18T10:19:07 | 222,411,828 | 250 | 164 | MIT | 2023-05-01T20:11:44 | 2019-11-18T09:33:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,075 | py | from sklearn.neighbors import NearestNeighbors
from keras import backend as K
import MalGAN_utils
from MalGAN_preprocess import preprocess
import numpy as np
def gen_adv_samples(model, fn_list, pad_percent=0.1, step_size=0.001, thres=0.5):
### search for nearest neighbor in embedding space ###
def emb_search(org, adv, pad_idx, pad_len, neigh):
out = org.copy()
for idx in range(pad_idx, pad_idx+pad_len):
target = adv[idx].reshape(1, -1)
best_idx = neigh.kneighbors(target, 1, False)[0][0]
out[0][idx] = best_idx
return out
max_len = int(model.input.shape[1])
emb_layer = model.layers[1]
emb_weight = emb_layer.get_weights()[0]
inp2emb = K.function([model.input]+ [K.learning_phase()], [emb_layer.output]) # [function] Map sequence to embedding
# Build neighbor searches
neigh = NearestNeighbors(1)
neigh.fit(emb_weight)
log = MalGAN_utils.logger()
adv_samples = []
for e, fn in enumerate(fn_list):
### run one file at a time due to different padding length, [slow]
inp, len_list = preprocess([fn], max_len)
inp_emb = np.squeeze(np.array(inp2emb([inp, False])), 0)
pad_idx = len_list[0]
pad_len = max(min(int(len_list[0]*pad_percent), max_len-pad_idx), 0)
org_score = model.predict(inp)[0][0] ### origianl score, 0 -> malicious, 1 -> benign
loss, pred = float('nan'), float('nan')
if pad_len > 0:
if org_score < thres:
adv_emb, gradient, loss = fgsm(model, inp_emb, pad_idx, pad_len, e, step_size)
adv = emb_search(inp, adv_emb[0], pad_idx, pad_len, neigh)
pred = model.predict(adv)[0][0]
final_adv = adv[0][:pad_idx+pad_len]
else: # use origin file
final_adv = inp[0][:pad_idx]
log.write(fn, org_score, pad_idx, pad_len, loss, pred)
# sequence to bytes
bin_adv = bytes(list(final_adv))
adv_samples.append(bin_adv)
return adv_samples, log | [
"dineshchaudhary@packtpub.com"
] | dineshchaudhary@packtpub.com |
cc45fdbbddc41768ecf333b33f0e6b2c01dcbd95 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/services/ad_schedule_view_service_client_config.py | 90efc7090c35efbde8f0c9a47c102453d887ece9 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 815 | py | config = {
"interfaces": {
"google.ads.googleads.v3.services.AdScheduleViewService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetAdScheduleView": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
4464dc2f4c00b362193e2739b6b2a68359b658c1 | 3f3156337011c4b6cdc56f4eb5122b5132f510bd | /object-oriented.py | e4a7c468b8ebad2bd8a89f39f3ca4b86bf9067e9 | [] | no_license | KEHANG/Programming-Style-Examples | 620022ba73eb04e189a517d70330ccc9884e190b | f02833d72c5859db450a43f9724f66cdb0d81012 | refs/heads/master | 2020-12-03T06:45:49.922488 | 2014-11-14T00:57:05 | 2014-11-14T00:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # Print out all the state names from the csv
# Coded in the "object-oriented" style
from electiondata import ElectionResults
filename = '2012_US_election_state.csv'
results = ElectionResults(filename)
results.load()
print "Opened file:"
state_names = results.states()
for state in state_names:
print " "+state
print "done ("+str(results.state_count())+" lines)"
results.addCandidates()
results.addTotalVotes()
candidates = results.candidates
for cand in candidates:
print cand + ": " + str(results.totalVotes[cand]) | [
"kehanghan@gmail.com"
] | kehanghan@gmail.com |
7da40b77085e8a9aff24f6e4855a7b4fee264fef | 373c0cc659e0c77739ff87f01b159ab3969bce72 | /pca/pca5.py | a80b3a04c841b8406b5da0159da05a9db468a7bd | [] | no_license | c1a1o1/cancer | 75da16ab43d4925f849fc0c84a98d69d46e3aea1 | 268f8e1553f1237fc2d0d0d3d7c13f664792aa92 | refs/heads/master | 2021-01-20T04:17:09.858006 | 2017-02-24T16:53:35 | 2017-02-24T16:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | #from numpy import mean, cov, cumsum, dot, linalg, size, flipud, argsort
#from pylab import imread, subplot, imshow, title, gray, figure, show, NullLocator, imsave
import numpy as np
import matplotlib.pyplot as plt
def princomp(A, numpc=0):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - np.mean(A.T, axis=1)).T # subtract the mean (along columns)
[eigenvalues, eigenvectors] = np.linalg.eig(np.cov(M))
p = np.size(eigenvectors, axis=1)
idx = np.argsort(eigenvalues) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
eigenvectors = eigenvectors[:, idx]
eigenvalues = eigenvalues[idx] # sorting eigenvalues
projection = 0
if numpc < p and numpc >= 0:
eigenvectors = eigenvectors[:, range(numpc)] # cutting some PCs if needed
projection = np.dot(eigenvectors.T, M) # projection of the data in the new space
return eigenvectors, eigenvalues, projection
A = plt.imread('../shakira.jpg') # load an image
A = np.mean(A, 2) # to get a 2-D array
full_pc = np.size(A, axis=1) # numbers of all the principal components
i = 1 # subplots
dist = []
for numpc in range(0, full_pc+10, 10): # 0 10 20 ... full_pc
# showing the pics reconstructed with less than 50 PCs
if numpc <= 50:
eigenvectors, eigenvalues, projection = princomp(A, numpc)
Ar = np.dot(eigenvectors, projection).T + np.mean(A, axis=0) # image reconstruction
# difference in Frobenius norm
dist.append(np.linalg.norm(A-Ar, 'fro'))
ax = plt.subplot(2, 3, i, frame_on=False)
ax.xaxis.set_major_locator(plt.NullLocator()) # remove ticks
ax.yaxis.set_major_locator(plt.NullLocator())
i += 1
plt.imshow(Ar)
plt.title('PCs # ' + str(numpc))
plt.gray()
if numpc == 50:
A50 = Ar
plt.figure()
plt.imshow(A)
plt.title('numpc FULL')
plt.gray()
#imsave("shakira40.jpg", A50)
plt.figure()
plt.imshow(A50)
plt.title('numpc 50')
plt.gray()
plt.figure()
perc = np.cumsum(eigenvalues) / sum(eigenvalues)
dist = dist / np.max(dist)
plt.plot(range(len(perc)), perc, 'b')
#plt.plot(range(len(dist)), dist, 'r')
plt.axis([0, full_pc, 0, 1.1])
plt.show()
| [
"senenbotello@gmail.com"
] | senenbotello@gmail.com |
053874357b8792fc6cb4e9a3cb44ce28e06e37f3 | 8d6d19a97370a331eb60cb63423ca580a2ff821c | /cross_link/__init__.py | 152551c4fbbdc2fd8142e8ff378c026b677e7504 | [
"BSD-3-Clause"
] | permissive | brucewxh/IntraArchiveDeduplicator | 6e61b124beea8d4b57849a30c47ac6b7262d8045 | 7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb | refs/heads/master | 2020-06-13T16:57:41.285069 | 2019-07-01T18:22:02 | 2019-07-01T18:22:02 | 194,722,731 | 0 | 0 | BSD-3-Clause | 2019-07-01T18:19:00 | 2019-07-01T18:18:59 | null | UTF-8 | Python | false | false | 12 | py | # Tests init | [
"something@fake-url.com"
] | something@fake-url.com |
822d0722fc6c480872f5f0a209984e64e0d29091 | 597f847f0bd2112e45ca50979acec403a5f969cf | /python/day06/exercise/test2.py | f2ef95420fe5500c69f3b2906f06ca1a0d91a442 | [] | no_license | wangyuhui12/AID1804 | e5d26aa6d505655cd885784cc0645c9ea8323df5 | 7291fc9411b541d17e540dd829166e64887fd9f7 | refs/heads/master | 2022-09-11T23:10:47.631426 | 2022-08-03T09:01:13 | 2022-08-03T09:01:13 | 136,152,631 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py |
# 2、有一些数存在于列表L中,如:
# L = [1, 3, 2, 1, 6, 4, ..., 98, 82]
# (此数据自己定义)
# 将列表L中的数存入于另一个列表L2中(要求,重复出现多次的数字只在L2列表中保留一份)
L = []
while True:
n = input("请输入一个整数:")
if not n:
break
n = int(n)
L.append(n)
# for i in L:
# if L.count(i) > 1:
# j = L.count(i)
# for x in range(j-1):
# L.remove(i)
# 判断i是否在L2中,是则添加,否则continue
L2 =[]
for i in L:
if i in L2:
continue
L2.append(i)
print(L2)
| [
"noreply@github.com"
] | wangyuhui12.noreply@github.com |
c1d153362cd6ae47be10bb825b009ce4fa2a7ef9 | 204ec78fcebcea9e1e1da4905cf3fad0a514b01f | /pyocd/target/builtin/target_MPS3_AN522.py | c8d9dd2265ca1de7624831c0ec1156619eb87e21 | [
"Apache-2.0"
] | permissive | ARMmbed/pyOCD | 659340bf8753aa8e15a72890b8bea64dff2c2f42 | d4cdcf7e532cae17caad866839287bbe1e0d952b | refs/heads/master | 2023-05-31T13:45:15.797588 | 2020-10-12T13:55:47 | 2020-10-12T13:55:47 | 190,203,829 | 3 | 1 | Apache-2.0 | 2019-07-05T11:05:40 | 2019-06-04T13:09:56 | Python | UTF-8 | Python | false | false | 1,492 | py | # pyOCD debugger
# Copyright (c) 2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (RamRegion, MemoryMap)
class AN522(CoreSightTarget):
VENDOR = "Arm"
MEMORY_MAP = MemoryMap(
RamRegion( name='itcm', start=0x00000000, length=0x00080000, access='rwx'),
RamRegion( name='sram', start=0x30000000, length=0x00100000, access='rwx'),
RamRegion( name='dram6_ns', start=0x60000000, length=0x10000000, access='rwx'),
RamRegion( name='dram7_s', start=0x70000000, length=0x10000000, access='rwxs'),
RamRegion( name='dram8_ns', start=0x80000000, length=0x10000000, access='rwx'),
RamRegion( name='dram9_s', start=0x90000000, length=0x10000000, access='rwxs'),
)
def __init__(self, session):
super(AN522, self).__init__(session, self.MEMORY_MAP)
| [
"flit@me.com"
] | flit@me.com |
737661d6a037d5d33ce44173209f72af25276344 | efa2d7ad4ad9da185d3b168793d92d8b02d8e1cc | /doc_src/conf.py | b48bd49c4ae28cc2bb8f2036b07d76019381ad8c | [
"BSD-3-Clause"
] | permissive | justquick/google-chartwrapper | 5f003de140769ea288b0ac7cf60c1ae5f5033a7f | 25d0f88bba4cf30da5b40ba07d647e703453d9bd | refs/heads/master | 2016-09-05T14:13:29.210254 | 2014-06-28T07:36:54 | 2014-06-28T07:36:54 | 1,617,083 | 7 | 3 | null | 2014-06-28T07:36:54 | 2011-04-15T01:37:45 | Python | UTF-8 | Python | false | false | 6,307 | py | # -*- coding: utf-8 -*-
#
# app documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 21 13:18:22 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
import gchart
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GChartWrapper'
copyright = u'2009-2011, Justin Quick <justquick@gmail.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = gchart.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'docs'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'app.tex', u'GChartWrapper Documentation',
u'Justin Quick', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| [
"justquick@gmail.com"
] | justquick@gmail.com |
b141ab2597ba820e82b8091ee4334bd5218daa59 | d2fdd6b10b0467913971d1408a9a4053f0be9ffb | /datahub/metadata/migrations/0015_add_iso_to_country.py | 891f31f2965cb861506230efd7bb9f663ed86b23 | [] | no_license | jakub-kozlowski/data-hub-leeloo | fc5ecebb5e4d885c824fc7c85acad8837fcc5c76 | 7f033fcbcfb2f7c1c0e10bec51620742d3d929df | refs/heads/master | 2020-05-18T13:29:14.145251 | 2019-04-30T12:12:50 | 2019-04-30T12:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 2.1.2 on 2018-10-30 17:24
from pathlib import PurePath
from django.core.management import call_command
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0014_investmentprojectstage_exclude_from_investment_flow'),
]
operations = [
migrations.AddField(
model_name='country',
name='iso_alpha2_code',
field=models.CharField(blank=True, max_length=2),
),
]
| [
"info@marcofucci.com"
] | info@marcofucci.com |
ddbc45e6681b12728108ffcd2a027d6639b8f7a3 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/eventhub/azure-eventhub/azure/eventhub/_buffered_producer/_buffered_producer_dispatcher.py | 7bad79bead6409a6e962b5d0e80ddaf2063c179e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 7,290 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
from threading import Lock
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Optional, List, Callable, Union, TYPE_CHECKING
from ._partition_resolver import PartitionResolver
from ._buffered_producer import BufferedProducer
from .._producer import EventHubProducer
from ..exceptions import EventDataSendError, ConnectError, EventHubError
if TYPE_CHECKING:
from .._transport._base import AmqpTransport
from .._producer_client import SendEventTypes
_LOGGER = logging.getLogger(__name__)
class BufferedProducerDispatcher:
# pylint: disable=too-many-instance-attributes
def __init__(
self,
partitions: List[str],
on_success: Callable[["SendEventTypes", Optional[str]], None],
on_error: Callable[["SendEventTypes", Optional[str], Exception], None],
create_producer: Callable[..., EventHubProducer],
eventhub_name: str,
max_message_size_on_link: int,
*,
amqp_transport: AmqpTransport,
max_buffer_length: int = 1500,
max_wait_time: float = 1,
executor: Optional[Union[ThreadPoolExecutor, int]] = None
):
self._buffered_producers: Dict[str, BufferedProducer] = {}
self._partition_ids: List[str] = partitions
self._lock = Lock()
self._on_success = on_success
self._on_error = on_error
self._create_producer = create_producer
self._eventhub_name = eventhub_name
self._max_message_size_on_link = max_message_size_on_link
self._partition_resolver = PartitionResolver(self._partition_ids)
self._max_wait_time = max_wait_time
self._max_buffer_length = max_buffer_length
self._existing_executor = False
self._amqp_transport = amqp_transport
if not executor:
self._executor = ThreadPoolExecutor()
elif isinstance(executor, ThreadPoolExecutor):
self._existing_executor = True
self._executor = executor
elif isinstance(executor, int):
self._executor = ThreadPoolExecutor(executor)
def _get_partition_id(self, partition_id, partition_key):
if partition_id:
if partition_id not in self._partition_ids:
raise ConnectError(
"Invalid partition {} for the event hub {}".format(
partition_id, self._eventhub_name
)
)
return partition_id
if isinstance(partition_key, str):
return self._partition_resolver.get_partition_id_by_partition_key(
partition_key
)
return self._partition_resolver.get_next_partition_id()
def enqueue_events(
self, events, *, partition_id=None, partition_key=None, timeout_time=None
):
pid = self._get_partition_id(partition_id, partition_key)
with self._lock:
try:
self._buffered_producers[pid].put_events(events, timeout_time)
except KeyError:
buffered_producer = BufferedProducer(
self._create_producer(pid),
pid,
self._on_success,
self._on_error,
self._max_message_size_on_link,
executor=self._executor,
max_wait_time=self._max_wait_time,
max_buffer_length=self._max_buffer_length,
amqp_transport = self._amqp_transport,
)
buffered_producer.start()
self._buffered_producers[pid] = buffered_producer
buffered_producer.put_events(events, timeout_time)
def flush(self, timeout_time=None):
# flush all the buffered producer, the method will block until finishes or times out
with self._lock:
futures = []
for pid, producer in self._buffered_producers.items():
# call each producer's flush method
futures.append(
(
pid,
self._executor.submit(
producer.flush, timeout_time=timeout_time
),
)
)
# gather results
exc_results = {}
for pid, future in futures:
try:
future.result()
except Exception as exc: # pylint: disable=broad-except
exc_results[pid] = exc
if not exc_results:
_LOGGER.info("Flushing all partitions succeeded")
return
_LOGGER.warning(
"Flushing all partitions partially failed with result %r.", exc_results
)
raise EventDataSendError(
message="Flushing all partitions partially failed, failed partitions are {!r}"
" Exception details are {!r}".format(exc_results.keys(), exc_results)
)
def close(self, *, flush=True, timeout_time=None, raise_error=False):
with self._lock:
futures = []
# stop all buffered producers
for pid, producer in self._buffered_producers.items():
futures.append(
(
pid,
self._executor.submit(
producer.stop,
flush=flush,
timeout_time=timeout_time,
raise_error=raise_error,
),
)
)
exc_results = {}
# gather results
for pid, future in futures:
try:
future.result()
except Exception as exc: # pylint: disable=broad-except
exc_results[pid] = exc
if exc_results:
_LOGGER.warning(
"Stopping all partitions partially failed with result %r.",
exc_results,
)
if raise_error:
raise EventHubError(
message="Stopping all partitions partially failed, failed partitions are {!r}"
" Exception details are {!r}".format(
exc_results.keys(), exc_results
)
)
if not self._existing_executor:
self._executor.shutdown()
def get_buffered_event_count(self, pid):
try:
return self._buffered_producers[pid].buffered_event_count
except KeyError:
return 0
@property
def total_buffered_event_count(self):
return sum(
[self.get_buffered_event_count(pid) for pid in self._buffered_producers]
)
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
3eb5675cb98630a1417a7a99e8ead5bfe8caf461 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/show_member_detail_request.py | b7d02e8b454b32351aa7f4f4af4a67e39d3cac14 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,456 | py | # coding: utf-8
import pprint
import re
import six
class ShowMemberDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_id': 'str',
'member_id': 'str'
}
attribute_map = {
'backup_id': 'backup_id',
'member_id': 'member_id'
}
def __init__(self, backup_id=None, member_id=None):
"""ShowMemberDetailRequest - a model defined in huaweicloud sdk"""
self._backup_id = None
self._member_id = None
self.discriminator = None
self.backup_id = backup_id
self.member_id = member_id
@property
def backup_id(self):
"""Gets the backup_id of this ShowMemberDetailRequest.
备份副本id
:return: The backup_id of this ShowMemberDetailRequest.
:rtype: str
"""
return self._backup_id
@backup_id.setter
def backup_id(self, backup_id):
"""Sets the backup_id of this ShowMemberDetailRequest.
备份副本id
:param backup_id: The backup_id of this ShowMemberDetailRequest.
:type: str
"""
self._backup_id = backup_id
@property
def member_id(self):
"""Gets the member_id of this ShowMemberDetailRequest.
成员id
:return: The member_id of this ShowMemberDetailRequest.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""Sets the member_id of this ShowMemberDetailRequest.
成员id
:param member_id: The member_id of this ShowMemberDetailRequest.
:type: str
"""
self._member_id = member_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowMemberDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
f2e6beef9ab524d5325b345c9e2d1affa0b1a47d | 022228249a7892e1efeaa11f0d445ba7cb115891 | /product/api/serializers_site.py | e33b258e12bf2a82306d4ebbae1db3f0c1ac77f0 | [] | no_license | angaradev/django-vue-interface | bacef985b9f9cbd4379992133168c9278b4f707b | 2e55d217b12bcda3c54acbb90cd3466bca955c5a | refs/heads/master | 2022-12-01T09:24:29.788368 | 2022-11-22T07:09:44 | 2022-11-22T07:09:44 | 241,351,571 | 0 | 2 | null | 2022-11-22T07:09:45 | 2020-02-18T12:08:14 | JavaScript | UTF-8 | Python | false | false | 3,167 | py | # -*- coding: utf-8 -*-
# from rest_framework_recursive.fields import RecursiveField
from product.models import Cross
from rest_framework import serializers
from product.models import (
Product,
ProductImage,
Category,
Units,
CarModel,
CarMake,
CarEngine,
Country,
BrandsDict,
ProductVideos,
Category,
ProductDescription,
ProductAttribute,
ProductAttributeName,
)
class CategorySerializer(serializers.ModelSerializer):
"""Serializer for all categories in flat mode"""
class Meta:
model = Category
fields = ("id", "name", "slug", "cat_image", "level", "parent")
class RecursiveField(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context) # type: ignore
return serializer.data
class CategoryTreeSerializer(serializers.ModelSerializer):
"""
This class give us caregories in tree wiew json for front end
"""
children = RecursiveField(many=True)
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
class CategoryFirstLevelSerializer(serializers.ModelSerializer):
"""
First level Category Serializer
"""
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
depth = 1
class MpttTestSerializer(serializers.ModelSerializer):
"""
First level Category Serializer
"""
# some_count = serializers.SerializerMethodField()
class Meta:
model = Category
fields = ("id", "name", "parent", "children", "slug")
depth = 1
def get_some_count(self, obj):
return obj.some_count
class ProductCrossSerializer(serializers.ModelSerializer):
"""
Serializer for getting product Crosses
"""
class Meta:
model = Cross
fields = ["cross"]
depth = 0
class GetSingleProductSerializer(serializers.ModelSerializer):
"""
Serializer for getting single product for site no authentication required
Also getting all related fields like images, videos, attributes, etc...
"""
product_cross = ProductCrossSerializer(many=True, read_only=True)
class Meta:
model = Product
fields = [
"id",
"name",
"name2",
"cat_number",
"slug",
"brand",
"unit",
"car_model",
"category",
"related",
"engine",
"product_image",
"product_video",
"product_description",
"product_cross",
"product_attribute",
"one_c_id",
]
depth = 2 # Dont change it All may craches
class GetCarModelSerializer(serializers.ModelSerializer):
"""
Getting car models required from UI
"""
class Meta:
model = CarModel
fields = "__all__"
depth = 1
class GetCarMakesSerializer(serializers.ModelSerializer):
"""
Car Makes All list API
"""
class Meta:
model = CarMake
fields = "__all__"
| [
"angara99@gmail.com"
] | angara99@gmail.com |
a270f8f89e241c78f56752aad9c4849cf7d66f8c | 15c016140f03bb476549fa4bf20d4f52077783a6 | /ecl/module_loader.py | faf271acdef9b3503ee8c94a5b92d083abcd53f0 | [
"Apache-2.0"
] | permissive | nttcom/eclsdk | 5cbb3f0067d260d257f7366d18e0f554d8f17cb3 | c2dafba850c4e6fb55b5e10de79257bbc9a01af3 | refs/heads/master | 2023-08-09T06:24:53.466570 | 2022-12-01T02:44:48 | 2022-12-01T02:44:48 | 86,663,654 | 5 | 15 | Apache-2.0 | 2023-09-06T02:39:01 | 2017-03-30T05:45:19 | Python | UTF-8 | Python | false | false | 989 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Load various modules for authorization and eventually services.
"""
from stevedore import extension
def load_service_plugins(namespace):
service_plugins = extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
)
services = {}
for service in service_plugins:
service = service.obj
service.interface = None
services[service.service_type] = service
return services
| [
"h.ohta@ntt.com"
] | h.ohta@ntt.com |
872bb43358fa79c7700cf6626f0e22339467ce80 | 43f78a4d51f441b4dbbc1a84537804123201a246 | /dataaccess/dataexporter.py | b764c20af14dd42baafd8605f4af24b491277d9b | [] | no_license | shmyhero/data-process | 28d5bfb27999cb0d462453d4663f01f43a649386 | 63dac65b41333d6e81bf32ecaa6533b28975c985 | refs/heads/master | 2021-03-22T00:11:51.373193 | 2019-10-22T06:27:28 | 2019-10-22T06:27:28 | 98,510,392 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | import os
from utils.logger import Logger
from common.configmgr import ConfigMgr
from common.pathmgr import PathMgr
from optionskewdao import OptionSkewDAO
from vixdao import VIXDAO
class DataExporter(object):
def __init__(self, daily_raw_path=None):
self.logger = Logger(__name__, PathMgr.get_log_path())
self.config = ConfigMgr.get_output_config()
self.output_dir = self.config['output_dir']
def export_skew(self):
self.logger.info('Export skew data to csv...')
csv_file = os.path.join(self.output_dir, self.config['skew_file'])
OptionSkewDAO().export_data_to_csv(csv_file)
self.logger.info('Export skew completed.')
def export_vix(self):
self.logger.info('Export vix data to csv...')
vix_file = os.path.join(self.output_dir, self.config['vix_file'])
df = VIXDAO().gen_all_vix()
df.to_csv(vix_file)
self.logger.info('Export vix completed.')
if __name__ == '__main__':
#DataExporter().export_skew()
DataExporter().export_vix() | [
"elwin.luo@tradehero.mobi"
] | elwin.luo@tradehero.mobi |
e959b8a741f902b0414e1f40c0253495b77e7b9d | f6bba50fccc6fb0dae2f046193434cfb4b9d32d5 | /54/A.py | ebcd9d660e8171e1ea4e24847f3e19f299254c40 | [] | no_license | seven320/AtCoder | 4c26723d20004fe46ce118b882faabc05066841c | 45e301e330e817f1ace4be4088d3babe18588170 | refs/heads/master | 2021-11-22T22:57:32.290504 | 2021-10-24T09:15:12 | 2021-10-24T09:15:12 | 162,827,473 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # encoding:utf-8
import copy
import random
import bisect #bisect_left これで二部探索の大小検索が行える
import fractions #最小公倍数などはこっち
import math
import sys
mod = 10**9+7
sys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000
A,B = map(int,input().split())
if A==B:
ans = "Draw"
else:
if A == 1:
ans = "Alice"
elif B == 1:
ans = "Bob"
elif A>B:
ans = "Alice"
else:
ans = "Bob"
print(ans)
| [
"yosyuaomenw@yahoo.co.jp"
] | yosyuaomenw@yahoo.co.jp |
fcc98bedd3a0869fa161e82145679045adc49903 | 5fd4707876cac0a4ca3b14af9a936301c45b5599 | /10_序列的修改、散列和切片/fp_04_了解__getitem__和切片的行为.py | 49d7c6d581010d434f3c5d183befc6c6beade33c | [] | no_license | xuelang201201/FluentPython | 5b0d89bfc6ee1238ad77db9955ec7e8417b418b8 | 7cbedf7c780c2a9e0edac60484f2ad4c385e1dbd | refs/heads/master | 2022-04-26T21:49:16.923214 | 2020-04-27T01:27:50 | 2020-04-27T01:27:50 | 258,290,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class MySeq:
def __getitem__(self, index):
return index # 在这个示例中,__getitem__直接返回传给它的值。
s = MySeq()
print(s[1]) # 单个索引,没什么新奇的。
print(s[1:4]) # 1:4 表示法变成了 slice(1, 4, None)。
print(s[1:4:2]) # slice(1, 4, 2) 的意思是从 1 开始,到 4 结束,步幅为 2。
print(s[1:4:2, 9]) # 神奇的事发生了:如果 [] 中有逗号,那么 __getitem__ 收到的是元组。
print(s[1:4:2, 7:9]) # 元组中甚至可以有多个切片对象。
| [
"xuelang201201@gmail.com"
] | xuelang201201@gmail.com |
c78a2ef8cec79fb675dc42ec24e873d4b11a4064 | 0bc777a57e39c466a9482af9a6eda698ab3c1437 | /HeavyIonsAnalysis/JetAnalysis/python/jets/ak4PFJetSequence_pPb_mb_cff.py | 81e576a2ba7b5558d9b7a70a41dde21cfc4cb589 | [] | no_license | stahlleiton/cmssw | 3c78d80b9372fdf2a37f424372504b23c9dc4f78 | fcfda663dc8c315b505eb6bcc7e936401c01c4d1 | refs/heads/EWQAnalysis2017_8030 | 2023-08-23T13:50:40.837198 | 2017-11-09T17:45:31 | 2017-11-09T17:45:31 | 45,795,305 | 0 | 3 | null | 2021-04-30T07:36:28 | 2015-11-08T19:28:54 | C++ | UTF-8 | Python | false | false | 14,296 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
ak4PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak4PFJets"),
matched = cms.InputTag("ak4HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.4
)
ak4PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak4HiGenJets"),
matched = cms.InputTag("ak4HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.4
)
ak4PFparton = patJetPartonMatch.clone(src = cms.InputTag("ak4PFJets")
)
ak4PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak4PFJets"),
payload = "AK4PF_offline"
)
ak4PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('ak4CaloJets'))
#ak4PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak4HiCleanedGenJets'))
ak4PFbTagger = bTaggers("ak4PF",0.4)
#create objects locally since they dont load properly otherwise
#ak4PFmatch = ak4PFbTagger.match
ak4PFparton = patJetPartonMatch.clone(src = cms.InputTag("ak4PFJets"), matched = cms.InputTag("genParticles"))
ak4PFPatJetFlavourAssociationLegacy = ak4PFbTagger.PatJetFlavourAssociationLegacy
ak4PFPatJetPartons = ak4PFbTagger.PatJetPartons
ak4PFJetTracksAssociatorAtVertex = ak4PFbTagger.JetTracksAssociatorAtVertex
ak4PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
ak4PFSimpleSecondaryVertexHighEffBJetTags = ak4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak4PFSimpleSecondaryVertexHighPurBJetTags = ak4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak4PFCombinedSecondaryVertexBJetTags = ak4PFbTagger.CombinedSecondaryVertexBJetTags
ak4PFCombinedSecondaryVertexV2BJetTags = ak4PFbTagger.CombinedSecondaryVertexV2BJetTags
ak4PFJetBProbabilityBJetTags = ak4PFbTagger.JetBProbabilityBJetTags
ak4PFSoftPFMuonByPtBJetTags = ak4PFbTagger.SoftPFMuonByPtBJetTags
ak4PFSoftPFMuonByIP3dBJetTags = ak4PFbTagger.SoftPFMuonByIP3dBJetTags
ak4PFTrackCountingHighEffBJetTags = ak4PFbTagger.TrackCountingHighEffBJetTags
ak4PFTrackCountingHighPurBJetTags = ak4PFbTagger.TrackCountingHighPurBJetTags
ak4PFPatJetPartonAssociationLegacy = ak4PFbTagger.PatJetPartonAssociationLegacy
ak4PFImpactParameterTagInfos = ak4PFbTagger.ImpactParameterTagInfos
ak4PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
ak4PFJetProbabilityBJetTags = ak4PFbTagger.JetProbabilityBJetTags
ak4PFSecondaryVertexTagInfos = ak4PFbTagger.SecondaryVertexTagInfos
ak4PFSimpleSecondaryVertexHighEffBJetTags = ak4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak4PFSimpleSecondaryVertexHighPurBJetTags = ak4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak4PFCombinedSecondaryVertexBJetTags = ak4PFbTagger.CombinedSecondaryVertexBJetTags
ak4PFCombinedSecondaryVertexV2BJetTags = ak4PFbTagger.CombinedSecondaryVertexV2BJetTags
ak4PFSecondaryVertexNegativeTagInfos = ak4PFbTagger.SecondaryVertexNegativeTagInfos
ak4PFNegativeSimpleSecondaryVertexHighEffBJetTags = ak4PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
ak4PFNegativeSimpleSecondaryVertexHighPurBJetTags = ak4PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
ak4PFNegativeCombinedSecondaryVertexBJetTags = ak4PFbTagger.NegativeCombinedSecondaryVertexBJetTags
ak4PFPositiveCombinedSecondaryVertexBJetTags = ak4PFbTagger.PositiveCombinedSecondaryVertexBJetTags
ak4PFNegativeCombinedSecondaryVertexV2BJetTags = ak4PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
ak4PFPositiveCombinedSecondaryVertexV2BJetTags = ak4PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
ak4PFSoftPFMuonsTagInfos = ak4PFbTagger.SoftPFMuonsTagInfos
ak4PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
ak4PFSoftPFMuonBJetTags = ak4PFbTagger.SoftPFMuonBJetTags
ak4PFSoftPFMuonByIP3dBJetTags = ak4PFbTagger.SoftPFMuonByIP3dBJetTags
ak4PFSoftPFMuonByPtBJetTags = ak4PFbTagger.SoftPFMuonByPtBJetTags
ak4PFNegativeSoftPFMuonByPtBJetTags = ak4PFbTagger.NegativeSoftPFMuonByPtBJetTags
ak4PFPositiveSoftPFMuonByPtBJetTags = ak4PFbTagger.PositiveSoftPFMuonByPtBJetTags
ak4PFPatJetFlavourIdLegacy = cms.Sequence(ak4PFPatJetPartonAssociationLegacy*ak4PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#ak4PFPatJetFlavourAssociation = ak4PFbTagger.PatJetFlavourAssociation
#ak4PFPatJetFlavourId = cms.Sequence(ak4PFPatJetPartons*ak4PFPatJetFlavourAssociation)
ak4PFJetBtaggingIP = cms.Sequence(ak4PFImpactParameterTagInfos *
(ak4PFTrackCountingHighEffBJetTags +
ak4PFTrackCountingHighPurBJetTags +
ak4PFJetProbabilityBJetTags +
ak4PFJetBProbabilityBJetTags
)
)
ak4PFJetBtaggingSV = cms.Sequence(ak4PFImpactParameterTagInfos
*
ak4PFSecondaryVertexTagInfos
* (ak4PFSimpleSecondaryVertexHighEffBJetTags+
ak4PFSimpleSecondaryVertexHighPurBJetTags+
ak4PFCombinedSecondaryVertexBJetTags+
ak4PFCombinedSecondaryVertexV2BJetTags
)
)
ak4PFJetBtaggingNegSV = cms.Sequence(ak4PFImpactParameterTagInfos
*
ak4PFSecondaryVertexNegativeTagInfos
* (ak4PFNegativeSimpleSecondaryVertexHighEffBJetTags+
ak4PFNegativeSimpleSecondaryVertexHighPurBJetTags+
ak4PFNegativeCombinedSecondaryVertexBJetTags+
ak4PFPositiveCombinedSecondaryVertexBJetTags+
ak4PFNegativeCombinedSecondaryVertexV2BJetTags+
ak4PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
ak4PFJetBtaggingMu = cms.Sequence(ak4PFSoftPFMuonsTagInfos * (ak4PFSoftPFMuonBJetTags
+
ak4PFSoftPFMuonByIP3dBJetTags
+
ak4PFSoftPFMuonByPtBJetTags
+
ak4PFNegativeSoftPFMuonByPtBJetTags
+
ak4PFPositiveSoftPFMuonByPtBJetTags
)
)
ak4PFJetBtagging = cms.Sequence(ak4PFJetBtaggingIP
*ak4PFJetBtaggingSV
*ak4PFJetBtaggingNegSV
# *ak4PFJetBtaggingMu
)
ak4PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("ak4PFJets"),
genJetMatch = cms.InputTag("ak4PFmatch"),
genPartonMatch = cms.InputTag("ak4PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak4PFcorr")),
JetPartonMapSource = cms.InputTag("ak4PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("ak4PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("ak4PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("ak4PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("ak4PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("ak4PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("ak4PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("ak4PFJetBProbabilityBJetTags"),
cms.InputTag("ak4PFJetProbabilityBJetTags"),
#cms.InputTag("ak4PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("ak4PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("ak4PFTrackCountingHighEffBJetTags"),
cms.InputTag("ak4PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("ak4PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
ak4PFNjettiness = Njettiness.clone(
src = cms.InputTag("ak4PFJets"),
R0 = cms.double( 0.4)
)
ak4PFpatJetsWithBtagging.userData.userFloats.src += ['ak4PFNjettiness:tau1','ak4PFNjettiness:tau2','ak4PFNjettiness:tau3']
ak4PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak4PFpatJetsWithBtagging"),
genjetTag = 'ak4HiGenJets',
rParam = 0.4,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("ak4PF"),
jetName = cms.untracked.string("ak4PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak4GenJets"),
doGenTaus = cms.untracked.bool(False),
genTau1 = cms.InputTag("ak4GenNjettiness","tau1"),
genTau2 = cms.InputTag("ak4GenNjettiness","tau2"),
genTau3 = cms.InputTag("ak4GenNjettiness","tau3"),
doGenSym = cms.untracked.bool(False),
genSym = cms.InputTag("ak4GenJets","sym"),
genDroppedBranches = cms.InputTag("ak4GenJets","droppedBranches")
)
ak4PFJetSequence_mc = cms.Sequence(
#ak4PFclean
#*
ak4PFmatch
#*
#ak4PFmatchGroomed
*
ak4PFparton
*
ak4PFcorr
*
#ak4PFJetID
#*
ak4PFPatJetFlavourIdLegacy
#*
#ak4PFPatJetFlavourId # Use legacy algo till PU implemented
*
ak4PFJetTracksAssociatorAtVertex
*
ak4PFJetBtagging
*
ak4PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
ak4PFpatJetsWithBtagging
*
ak4PFJetAnalyzer
)
ak4PFJetSequence_data = cms.Sequence(ak4PFcorr
*
#ak4PFJetID
#*
ak4PFJetTracksAssociatorAtVertex
*
ak4PFJetBtagging
*
ak4PFNjettiness
*
ak4PFpatJetsWithBtagging
*
ak4PFJetAnalyzer
)
ak4PFJetSequence_jec = cms.Sequence(ak4PFJetSequence_mc)
ak4PFJetSequence_mb = cms.Sequence(ak4PFJetSequence_mc)
ak4PFJetSequence = cms.Sequence(ak4PFJetSequence_mb)
| [
"marta.verweij@cern.ch"
] | marta.verweij@cern.ch |
bbd4dc7142151d3c378e11906357393635ccc0eb | 01a682ab349df2690fd7ae6e918cb8e68b7aca44 | /train.py | b71c77dc926e16074db73763b62597aa42f95d5e | [
"MIT"
] | permissive | misads/torch_image_template | 4ecbeaa8c28764cab90b73101fb0309ae2856c8d | db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1 | refs/heads/master | 2020-09-22T09:18:48.737332 | 2020-01-07T13:36:47 | 2020-01-07T13:36:47 | 225,135,127 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,387 | py | """
PyTorch Image Template
Author: xuhaoyu@tju.edu.cn
File Structure:
.
├── train.py :Train and evaluation loop, errors and outputs visualization (Powered by TensorBoard)
├── test.py :Test
│
├── network
│ ├── Model.py :Define models, losses and parameter updating
│ └── *.py :Define networks
├── options
│ └── options.py :Define options
│
├── dataloader/ :Define Dataloaders
├── model_zoo :Commonly used models
├── utils
│ ├── misc_utils.py :System utils
│ └── torch_utils.py :PyTorch utils
│
├── checkpoints/<tag> :Trained checkpoints
├── logs/<tag> :Logs and TensorBoard event files
└── results/<tag> :Test results
Usage:
#### Train
python3 train.py --tag network_1 --epochs 800 --batch_size 16 --gpu_ids 1
#### Resume or Fine Tune
python3 train.py --load checkpoints/network_1 --which-epoch 500
#### test
python3 test.py --tag test_1 --dataset RESIDE
License: MIT
Last modified 12.24
"""
import os
import time
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from dataloader import dual_residual_dataset
from dataloader.image_folder import get_data_loader_folder
from eval import evaluate
from network import models
import pdb
from options import opt, logger
from utils.torch_utils import create_summary_writer, write_image, write_meters_loss, LR_Scheduler, tensor2im
import utils.misc_utils as utils
import torch
data_name = 'RESIDE'
data_root = './datasets/' + data_name + '/ITS/'
imlist_pth = './datasets/' + data_name + '/indoor_train_list.txt'
valroot = "./datasets/" + data_name + "/SOTS/nyuhaze500/"
val_list_pth = './datasets/' + data_name + '/sots_test_list.txt'
realroot = "./datasets/" + data_name + "/REAL/"
real_list_pth = './datasets/' + data_name + '/real.txt'
# dstroot for saving models.
# logroot for writting some log(s), if is needed.
save_root = os.path.join(opt.checkpoint_dir, opt.tag)
log_root = os.path.join(opt.log_dir, opt.tag)
utils.try_make_dir(save_root)
utils.try_make_dir(log_root)
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
# Transform
transform = transforms.ToTensor()
# Dataloader
max_size = 9999999
if opt.debug:
max_size = opt.batch_size * 10
train_dataset = dual_residual_dataset.ImageSet(data_root, imlist_pth,
transform=transform, is_train=True,
with_aug=opt.data_aug, crop_size=opt.crop, max_size=max_size)
dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=5)
######################
# Val dataset
######################
val_dataset = dual_residual_dataset.ImageSet(valroot, val_list_pth,
transform=transform)
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)
######################
# Real (val) dataset
######################
real_dataloader = get_data_loader_folder(realroot, 1, train=False, num_workers=1, crop=False)
if opt.model in models:
Model = models[opt.model]
else:
Model = models['default']
model = Model(opt)
# if len(opt.gpu_ids):
# model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model = model.cuda(device=opt.device)
# optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
# optimizer_G = model.g_optimizer
start_epoch = opt.which_epoch if opt.which_epoch else 0
model.train()
# Start training
print('Start training...')
start_step = start_epoch * len(dataloader)
global_step = start_step
total_steps = opt.epochs * len(dataloader)
start = time.time()
writer = create_summary_writer(log_root)
scheduler = None
if opt.lr_schedular is not None:
scheduler = LR_Scheduler(opt.lr_schedular, opt.lr, opt.epochs, len(dataloader), warmup_epochs=opt.warmup_epochs)
for epoch in range(start_epoch, opt.epochs):
for iteration, data in enumerate(dataloader):
break
global_step += 1
######################
# lr_schedular
######################
if opt.lr_schedular is not None:
scheduler(model.g_optimizer, iteration, epoch)
rate = (global_step - start_step) / (time.time() - start)
remaining = (total_steps - global_step) / rate
img, label, trans, _ = data
img_var = Variable(img, requires_grad=False).cuda(device=opt.device)
label_var = Variable(label, requires_grad=False).cuda(device=opt.device)
trans_var = Variable(trans, requires_grad=False).cuda(device=opt.device)
# Cleaning noisy images
# cleaned, A, t = model.cleaner(img_var)
fine, coarse_1, coarse_2, trans_1, trans_2 = model.update_G(img_var, label_var, trans_var)
# Jt = torch.clamp(cleaned * t, min=.01, max=.99)
# airlight = torch.clamp(A * (1-t), min=.01, max=.99)
if epoch % opt.log_freq == opt.log_freq - 1 and iteration < 5:
write_image(writer, 'train/%d' % iteration, '0_input', tensor2im(img), epoch)
write_image(writer, 'train/%d' % iteration, '1_fine', tensor2im(fine), epoch)
write_image(writer, 'train/%d' % iteration, '2_coarse_1', tensor2im(coarse_1), epoch)
write_image(writer, 'train/%d' % iteration, '3_coarse_2', tensor2im(coarse_2), epoch)
# write_image(writer, 'train/%d' % iteration, '4_trans_1', tensor2im(coarse_2), epoch)
# write_image(writer, 'train/%d' % iteration, '5_trans_2', tensor2im(coarse_2), epoch)
write_image(writer, 'train/%d' % iteration, '8_target', tensor2im(label_var), epoch)
write_image(writer, 'train/%d' % iteration, '9_trans', tensor2im(trans_var), epoch)
# update
pre_msg = 'Epoch:%d' % epoch
msg = '(loss) %s ETA: %s' % (str(model.avg_meters), utils.format_time(remaining))
utils.progress_bar(iteration, len(dataloader), pre_msg, msg)
# print(pre_msg, msg)
# print('Epoch(' + str(epoch + 1) + '), iteration(' + str(iteration + 1) + '): ' +'%.4f, %.4f' % (-ssim_loss.item(),
# l1_loss.item()))
# write_loss(writer, 'train', 'F1', 0.78, epoch)
write_meters_loss(writer, 'train', model.avg_meters, epoch)
logger.info('Train epoch %d, (loss) ' % epoch + str(model.avg_meters))
if epoch % opt.save_freq == opt.save_freq - 1 or epoch == opt.epochs-1: # 每隔10次save checkpoint
model.save(epoch)
if epoch % opt.eval_freq == (opt.eval_freq - 1):
model.eval()
# evaluate(model.cleaner, val_dataloader, epoch + 1, writer)
evaluate(model.cleaner, real_dataloader, epoch + 1, writer, 'SINGLE')
model.train()
pdb.set_trace()
# if epoch in [700, 1400]:
# for param_group in model.g_optimizer.param_groups:
# param_group['lr'] *= 0.1
| [
"523131316@qq.com"
] | 523131316@qq.com |
86310d2d1dae7fc2062f1ed8485374c8c5c31c55 | 744c3b66611b08782fcdd9d66261c4d55b00d426 | /examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/torque_stance_leg_controller.py | 87ebed4587aec4165966fa11f806621f140c7eea | [
"Zlib"
] | permissive | erwincoumans/bullet3 | 4ff9e0aa64b641c65b57b26f415dd69dbfb12256 | 6d181d78a5c7be8714c74055cddcf63d5ccef70a | refs/heads/master | 2023-03-10T14:58:18.072562 | 2023-02-24T18:32:53 | 2023-02-24T18:32:53 | 31,621,748 | 103 | 29 | NOASSERTION | 2019-02-25T17:31:00 | 2015-03-03T21:15:54 | C++ | UTF-8 | Python | false | false | 8,595 | py | # Lint as: python3
"""A torque based stance controller framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import logging
from typing import Any, Sequence, Tuple
import gin
import numpy as np
from pybullet_envs.minitaur.agents.baseline_controller import gait_generator as gait_generator_lib
from pybullet_envs.minitaur.agents.baseline_controller import leg_controller
#from pybullet_envs.minitaur.agents.baseline_controller.convex_mpc.python import convex_mpc
#from google3.util.task.python import error
try:
import mpc_osqp as convex_mpc # pytype: disable=import-error
except: #pylint: disable=W0702
print("You need to install motion_imitation")
print("or use pip3 install motion_imitation --user")
print("see also https://github.com/google-research/motion_imitation")
import sys
sys.exit()
_FORCE_DIMENSION = 3
# The QP weights in the convex MPC formulation. See the MIT paper for details:
# https://ieeexplore.ieee.org/document/8594448/
# Intuitively, this is the weights of each state dimension when tracking a
# desired CoM trajectory. The full CoM state is represented by
# (roll_pitch_yaw, position, angular_velocity, velocity, gravity_place_holder).
_MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0.5, 0.5, 0.2, 0.2, 0.2, 0.1, 0)
_PLANNING_HORIZON_STEPS = 10
_PLANNING_TIMESTEP = 0.025
#_MPC_CONSTRUCTOR = functools.partial(
# convex_mpc.ConvexMpc, qp_solver_name=convex_mpc.QPSolverName.QPOASES)
@gin.configurable
class TorqueStanceLegController(leg_controller.LegController):
"""A torque based stance leg controller framework.
Takes in high level parameters like walking speed and turning speed, and
generates necessary the torques for stance legs.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
desired_speed: Tuple[float] = (0, 0),
desired_twisting_speed: float = 0,
desired_roll_pitch: Tuple[float] = (0, 0),
desired_body_height: float = 0.45,
body_mass: float = 220 / 9.8,
body_inertia: Tuple[float] = (0.183375, 0, 0, 0, 0.6267, 0, 0, 0,
0.636175),
num_legs: int = 4,
friction_coeffs: Sequence[float] = (0.5, 0.5, 0.5, 0.5),
qp_weights: Sequence[float] = _MPC_WEIGHTS,
planning_horizon: int = _PLANNING_HORIZON_STEPS,
planning_timestep: int = _PLANNING_TIMESTEP,
):
"""Initializes the class.
Tracks the desired position/velocity of the robot by computing proper joint
torques using MPC module.
Args:
robot: A robot instance.
gait_generator: Used to query the locomotion phase and leg states.
state_estimator: Estimate the robot states (e.g. CoM velocity).
desired_speed: desired CoM speed in x-y plane.
desired_twisting_speed: desired CoM rotating speed in z direction.
desired_roll_pitch: desired CoM roll and pitch.
desired_body_height: The standing height of the robot.
body_mass: The total mass of the robot.
body_inertia: The inertia matrix in the body principle frame. We assume
the body principle coordinate frame has x-forward and z-up.
num_legs: The number of legs used for force planning.
friction_coeffs: The friction coeffs on the contact surfaces.
qp_weights: The weights used in solving the QP problem.
planning_horizon: Number of steps to roll-out in the QP formulation.
planning_timestep: Timestep between each step in the QP formulation.
"""
self._robot = robot
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self._desired_speed = desired_speed
self._desired_twisting_speed = desired_twisting_speed
self._desired_roll_pitch = desired_roll_pitch
self._desired_body_height = desired_body_height
self._body_mass = body_mass
self._num_legs = num_legs
self._friction_coeffs = np.array(friction_coeffs)
self._qp_solver_fail = False
self._com_estimate_leg_indices = None
qp_solver = convex_mpc.QPOASES #convex_mpc.OSQP #
body_inertia_list = list(body_inertia)
weights_list = list(qp_weights)
self._mpc = convex_mpc.ConvexMpc(
body_mass,
body_inertia_list,
self._num_legs,
planning_horizon,
planning_timestep,
weights_list,
1e-6,
qp_solver
)
def reset(self, current_time):
del current_time
self._qp_solver_fail = False
self._com_estimate_leg_indices = None
def update(self, current_time):
del current_time
def get_action(self):
"""Computes the torque for stance legs."""
desired_com_position = np.array((0., 0., self._desired_body_height),
dtype=np.float64)
desired_com_velocity = np.array(
(self.desired_speed[0], self.desired_speed[1], 0.), dtype=np.float64)
desired_com_roll_pitch_yaw = np.array(
(self.desired_roll_pitch[0], self.desired_roll_pitch[1], 0.),
dtype=np.float64)
desired_com_angular_velocity = np.array(
(0., 0., self.desired_twisting_speed), dtype=np.float64)
foot_contact_state = np.array(
[(leg_state == gait_generator_lib.LegState.STANCE or
leg_state == gait_generator_lib.LegState.EARLY_CONTACT)
for leg_state in self._gait_generator.desired_leg_state],
dtype=np.int32)
# We use the body yaw aligned world frame for MPC computation.
com_roll_pitch_yaw = np.array(
self._robot.base_roll_pitch_yaw, dtype=np.float64)
com_roll_pitch_yaw[2] = 0
#try:
estimated_com_position = np.array(())
if hasattr(self._state_estimator, "estimated_com_height"):
estimated_com_position = np.array(
(0, 0, self._state_estimator.estimated_com_height))
try:
predicted_contact_forces = self._mpc.compute_contact_forces(
estimated_com_position, #com_position
np.asarray(self._state_estimator.com_velocity_body_yaw_aligned_frame,
dtype=np.float64), #com_velocity
np.array(com_roll_pitch_yaw, dtype=np.float64), #com_roll_pitch_yaw
# Angular velocity in the yaw aligned world frame is actually different
# from rpy rate. We use it here as a simple approximation.
np.asarray(self._robot.base_roll_pitch_yaw_rate,
dtype=np.float64), #com_angular_velocity
foot_contact_state, #foot_contact_states
np.array(self._robot.foot_positions(
position_in_world_frame=False).flatten(),
dtype=np.float64), #foot_positions_base_frame
self._friction_coeffs, #foot_friction_coeffs
desired_com_position, #desired_com_position
desired_com_velocity, #desired_com_velocity
desired_com_roll_pitch_yaw, #desired_com_roll_pitch_yaw
desired_com_angular_velocity #desired_com_angular_velocity
)
except:# error.StatusNotOk as e:
logging.error("Error in Torque Stance Leg")#e.message)
self._qp_solver_fail = True
predicted_contact_forces = np.zeros(self._num_legs * _FORCE_DIMENSION)
contact_forces = {}
for i in range(self._num_legs):
contact_forces[i] = np.array(
predicted_contact_forces[i * _FORCE_DIMENSION:(i + 1) *
_FORCE_DIMENSION])
_, kds = self._robot.motor_model.get_motor_gains()
action = {}
for leg_id, force in contact_forces.items():
motor_torques = self._robot.map_contact_force_to_joint_torques(
leg_id, force)
for joint_id, torque in motor_torques.items():
action[joint_id] = (0, 0, 0, kds[joint_id], torque)
return action
@property
def qp_solver_fail(self):
return self._qp_solver_fail
@property
def desired_speed(self):
return self._desired_speed
@desired_speed.setter
def desired_speed(self, speed):
self._desired_speed = speed
@property
def desired_twisting_speed(self):
return self._desired_twisting_speed
@desired_twisting_speed.setter
def desired_twisting_speed(self, twisting_speed):
self._desired_twisting_speed = twisting_speed
@property
def desired_roll_pitch(self):
return self._desired_roll_pitch
@desired_roll_pitch.setter
def desired_roll_pitch(self, roll_pitch):
self._desired_roll_pitch = roll_pitch
@property
def desired_body_height(self):
return self._desired_body_height
@desired_body_height.setter
def desired_body_height(self, body_height):
self._desired_body_height = body_height
| [
"erwin.coumans@gmail.com"
] | erwin.coumans@gmail.com |
24eca094d917207229a1b23a1435763f5d8d962d | a84e1ed67ef2592cf22f7d19cdddaf16700d6a8e | /graveyard/web/VNET/branches/vnf/content/actors/obsolete/vaspPhon.odb | 0b915dda2a74f0472b61de90044a53085518ebfd | [] | no_license | danse-inelastic/inelastic-svn | dda998d7b9f1249149821d1bd3c23c71859971cc | 807f16aa9510d45a45360d8f59f34f75bb74414f | refs/heads/master | 2016-08-11T13:40:16.607694 | 2016-02-25T17:58:35 | 2016-02-25T17:58:35 | 52,544,337 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 558 | odb | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.VaspPhon import VaspPhon
def actor():
return VaspPhon()
# version
__id__ = "$Id: greet.odb,v 1.1.1.1 2006-11-27 00:09:15 aivazis Exp $"
# End of file
| [
"jbrkeith@gmail.com"
] | jbrkeith@gmail.com |
a5678355479cffd9ab84a011fd49e713ecd02ad5 | 177c090fffc3baba54db88fd51f4f21c74f6acb3 | /manage.py | 334c2decfb81708d1bd7a6599be5ddd095d697d7 | [] | no_license | Deep-sea-boy/iHome | d68dcd196c204d63766fc75bdc214fbd8fa6177c | 0cb9a23e864e9b8b82126db6e6bedf62a52b73d7 | refs/heads/master | 2020-03-11T02:52:39.851372 | 2018-04-23T02:31:10 | 2018-04-23T02:31:10 | 130,633,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #coding:utf-8
from flask_migrate import Migrate,MigrateCommand
from flask_script import Manager
from iHome import get_app,db
app = get_app('dev')
# # 在迁移时让app和db建⽴关联
Migrate(app,db)
manager = Manager(app)
# 将迁移脚本添加到脚本管理器
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
print app.url_map
manager.run()
| [
"youremail@example.com"
] | youremail@example.com |
b29ec90a8c342083bb1c9315e24d1a38f21f3c5d | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/build/lib/twilio/rest/conversations/v1/configuration/__init__.py | 662c9c24e4e32fe46eca6dd20efe791d972d6ff9 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:462dd494db244faa218d38ab9f086e0defd945bc165b5d99f3410b331d22010e
size 11015
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
d1dd441e212f6b5630bf450ef5d17cd930f65e8f | 0e94b21a64e01b992cdc0fff274af8d77b2ae430 | /function/最大逆向匹配.py | 7bbeb95f5295f34cf11bb6d1a625805a4c92e954 | [] | no_license | yangnaGitHub/LearningProcess | 1aed2da306fd98f027dcca61309082f42b860975 | 250a8b791f7deda1e716f361a2f847f4d12846d3 | refs/heads/master | 2020-04-15T16:49:38.053846 | 2019-09-05T05:52:04 | 2019-09-05T05:52:04 | 164,852,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 11:37:30 2019
@author: natasha_yang
@e-mail: ityangna0402@163.com
"""
#分词方法,逆向最大匹配
class RMM():
def __init__(self, dict_path):
#prepare dictionary
self.dictionary = set()
self.maximum = 0
with open(dict_path, 'r', encoding='utf-8') as fd:
for line in fd:
line = line.strip()
if not line:
continue
self.dictionary.add(line)
self.maximum = max(self.maximum, len(line))
def cut(self, cut_str):
result = []
index = len(cut_str)
while 0 < index:
findword = False
for steptag in range(self.maximum, 0, -1):
step = index - steptag
if 0 > step:
step = 0
temp = cut_str[step:index]
if temp in self.dictionary:
findword = True
result.append(temp)
index = step
break
if not findword:
result.append(cut_str[index-1])
index -= 1
return result | [
"ityangna0402@163.com"
] | ityangna0402@163.com |
0c42f661e88b5e9e2c9be1f70ac0b4dd7b1fbd0d | 08296e4f0139bd23ec73836996e3637eda666a68 | /modelsFormsDemo/modelforms/forms.py | ad6e17a28c64283db488cff139dd2c04020dd4f9 | [] | no_license | 146789/projects | 4589efd8f2f11a1beb487ef06a7556d49ed95c72 | a288e39cd088ea2717017285fd68d8b42cf4d493 | refs/heads/main | 2023-02-02T13:54:34.360222 | 2020-12-19T08:16:11 | 2020-12-19T08:16:11 | 322,798,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
from modelforms.models import Project
class projectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
| [
"34913079+146789@users.noreply.github.com"
] | 34913079+146789@users.noreply.github.com |
d78b64e291c878941f42ce614f2374cf3d1e5db0 | 060967fa3e6e390ac0504172e6dea8421ffb9d98 | /2022/python2022/tests/test_day05.py | 6e3cdc045d3b37efe23b317fb4ef6d05472cd6dd | [] | no_license | mreishus/aoc | 677afd18521b62c9fd141a45fec4b7bc844be259 | e89db235837d2d05848210a18c9c2a4456085570 | refs/heads/master | 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 | Python | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/env python3
"""
Test Day05.
"""
import unittest
from aoc.day05 import Day05
class TestDay05(unittest.TestCase):
"""Test Day05."""
def test_part1(self):
"""Test part1"""
self.assertEqual(Day05.part1("../inputs/05/input_small.txt"), "CMZ")
self.assertEqual(Day05.part1("../inputs/05/input.txt"), "RNZLFZSJH")
def test_part2(self):
"""Test part2"""
self.assertEqual(Day05.part2("../inputs/05/input_small.txt"), "MCD")
self.assertEqual(Day05.part2("../inputs/05/input.txt"), "CNSFCGJSM")
if __name__ == "__main__":
unittest.main()
| [
"mreishus@users.noreply.github.com"
] | mreishus@users.noreply.github.com |
254f2622c359329e542c446d230344005908e2cf | 0bd7c1f7bf6da5ef92b9013e1d913140f0249dfa | /cecilia-python/company-title/alibaba/2020/LongestStringNote.py | bb33251577054be3950acdf1f824040773ccc3af | [] | no_license | Cecilia520/algorithmic-learning-leetcode | f1fec1fae71c4cf7410122f5ce969e829f451308 | 32941ee052d0985a9569441d314378700ff4d225 | refs/heads/master | 2022-05-02T03:00:57.505672 | 2022-03-19T09:51:28 | 2022-03-19T09:51:28 | 229,673,810 | 7 | 1 | null | 2022-03-19T09:34:57 | 2019-12-23T04:04:04 | Python | UTF-8 | Python | false | false | 2,909 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : LongestStringNote.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/3/30 22:09 cecilia 1.0 最长字符串音符统计
问题描述:
小强最近喜欢弹钢琴,一段旋律中的每个音符都可以用一个小写英文字母表示。
当组成一段段旋律的字符ASCII码是非递减的,旋律被称为是高昂的,例如aaa,bcd。
现在小强已经学会了n段高昂的旋律,他想利用他们拼接出一个尽可能长的高昂的旋律,问最长长度是多少?
输入描述:n行每行一个字符串,保证每个字符串中的字符的ASCII是非递减的。n在[1, 1000,000],
保证所有字符串长度之和不超过1000,000且仅由小写字母构成。
示例1:
> Input:
4
aaa
bcd
zzz
bcdef
> OutPut: 11。将1,4,3段字符拼接在一起,长度为11。
示例2:
输入:
4
abghkl
behklmmm
hopqsttz
yzzz
输出:
12
"""
class Solution:
def getMaxLongestNote(self, n, notes):
"""
计算最长上升的音符串的拼接长度
思路方法:双指针法。创建两个指针,一个指针指向当前的音符的首字母索引位置,一个指针指向前一个字符的末尾字符的索引位置。
其实题目是给定了n个上升音符串,如果题目给定的音符不是满足上升非递减规律,另外还需要对每个音符串做判断的,
但是这里已经说明了每个音符是满足非递减规律的,因此不需要考虑内部的,只需要考虑字符串之间的关系。
:param n: 音符集合长度
:param notes: 音符集合
:return:
算法分析:时间复杂度O(NlogN),空间复杂度O(N)
"""
if n < 0:
return 0
# 对所有的字符集合中的首字母进行排序
notes = sorted(notes)
#
print(notes)
# 定义状态,dp[i]代表前i个音符组成的最长长度
dp = [0] * n
dp[0] = len(notes[0])
maxL = 0
maxres = 0
curentL = 0
for i in range(1, n):
for j in range(i):
# 如果当前音符的首字符大于前一个音符的末尾字符
if notes[i][0] >= notes[j][-1]:
# curentL = len(notes[i]) + len(notes[j])
dp[i] = max(dp[j], len(notes[j]) + len(notes[i]))
dp[i] = maxL + len(notes[i])
maxres = max(maxres, dp[i])
print(dp)
return max(dp)
if __name__ == '__main__':
n = int(input())
notes = []
for i in range(n):
notes.append(input())
# print(notes)
s = Solution()
res = s.getMaxLongestNote(n, notes)
print(res)
| [
"cc15572018516@163.com"
] | cc15572018516@163.com |
1448d856879ce5cf3724a82a93410eb12c35ce47 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_abominating.py | cf50c6ad0533dcdabbfe02cae9118aac20902ad1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _ABOMINATING():
def __init__(self,):
self.name = "ABOMINATING"
self.definitions = abominate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['abominate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aca024609ff1a5d1f0412312402ccbc8796917f4 | c191c82ca5b67d2caf447e16e6d2368404fb6730 | /collegeproject/faculty/forms.py | 03a3c44e6c8587059cf6254f5dcdbfbf8acccb71 | [] | no_license | arunthankachan07/DjangoProjects | 070457fe0afeaea0633ab674b311f220fa6bec83 | 08bd1925ff2d882876b79bc0d8820f033dde3bb3 | refs/heads/master | 2023-04-22T07:26:19.444894 | 2021-05-10T06:36:38 | 2021-05-10T06:36:38 | 361,839,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django import forms
class FacultyRegistrationForm(forms.Form):
firstname=forms.CharField(max_length=120)
username=forms.CharField(max_length=120)
password=forms.CharField(max_length=100)
class FacultyLoginForm(forms.Form):
username=forms.CharField(max_length=120)
password=forms.CharField(max_length=100)
| [
"arunkaripuzha07@gmail.com"
] | arunkaripuzha07@gmail.com |
f145fc7349b8ccd698ec345a9ddca5d0ea75c128 | d8fd66452f17be82b964f9a93577dbaa2fa23451 | /movie/loader.py | 7cfb7a39dc2d8145dffa36be1475b34d9e244423 | [] | no_license | Dawinia/gp_DA_movie | 8b7575a54502896f5658538563f3f1f8cfe38772 | e0253cc8bc16daf1d32b9c861f7fcb03510937f6 | refs/heads/master | 2023-05-25T11:49:39.422657 | 2021-12-14T03:26:35 | 2021-12-14T03:26:35 | 233,504,205 | 3 | 0 | null | 2023-05-22T22:44:11 | 2020-01-13T03:33:48 | Python | UTF-8 | Python | false | false | 106 | py | # encoding: utf-8
from scrapy.loader import ItemLoader
class BoxOfficeItemLoader(ItemLoader):
pass
| [
"dawinialo@163.com"
] | dawinialo@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.