blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8f79e485a62acd559aacca26284916a6d7816cc | 30575ea2c63b40f5c578acc3dd3aa306c539f0b8 | /dialog/schema/factories/conditions/results_count.py | e13a9bbe9dd13e2689486c3cb6a17fd639f0ae58 | [] | no_license | robdefeo/dialog | 87b8cff9b48fd6980ab7ad167823de38a40c91bb | 1db84ca439d226e9d9222d76d825b6c92f56edd8 | refs/heads/master | 2021-05-30T11:39:58.053586 | 2016-02-07T22:22:55 | 2016-02-07T22:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from dialog.elements import Condition
from dialog.schema.factories.variables import NAME_RESULTS_COUNT
class ResultsCountConditions:
@staticmethod
def equals_zero():
return Condition(name=NAME_RESULTS_COUNT, operator="EQUALS", root_text="0")
@staticmethod
def less_than(value):
return Condition(name=NAME_RESULTS_COUNT, operator="LESS_THEN", root_text=value)
# @staticmethod
# def has_value():
# return Condition(name="Color_Preference", operator="HAS_VALUE")
| [
"robertodefeo@hotmail.com"
] | robertodefeo@hotmail.com |
63bfbaf1cc2e5f3abf32344a9ebfe404aa6104e6 | 5bf4a43469b8f8ddeb924a1a7a1073a804151fef | /InputGUI/VideoPlayer.py | 8b4c11b2c13e174d2e23ae68bf7e7e89f2d82eb9 | [] | no_license | MSauerM/CVFouldetection | b52eee085a12630a8f32452edf6f6d86ec48b7d3 | 15d4fcbb1826d7fb9bb640b10981599fef627903 | refs/heads/master | 2023-07-25T14:41:00.179637 | 2021-09-01T20:53:56 | 2021-09-01T20:53:56 | 357,150,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | import sys
from PyQt5.QtCore import QDir, Qt, QUrl
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QMainWindow, QStyle, QPushButton, QSlider, QWidget, QHBoxLayout, QVBoxLayout
'''angelehnt an https://pythonprogramminglanguage.com/pyqt5-video-widget/'''
class VideoPlayer(QMainWindow):
def __init__(self, parent = None):
super(VideoPlayer, self).__init__(parent)
self.setWindowTitle("Video Player")
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
# Play Button
self.playButton = QPushButton()
self.playButton.setEnabled(False)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.play)
self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider.setRange(0, 0)
self.positionSlider.sliderMoved.connect(self.setPosition)
mainWidget = QWidget(self)
self.setCentralWidget(mainWidget)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(self.playButton)
controlLayout.addWidget(self.positionSlider)
layout = QVBoxLayout()
layout.addWidget(videoWidget)
layout.addLayout(controlLayout)
mainWidget.setLayout(layout)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
def play(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
def positionChanged(self, position):
self.positionSlider.setValue(position)
def mediaStateChanged(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def setPosition(self, position):
self.mediaPlayer.setPosition(position)
def loadFile(self, fileName):
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))
| [
"matthias.sauer97@gmx.net"
] | matthias.sauer97@gmx.net |
12484dd307d23b9f54f7be7010d4772395a2d8c2 | 97118a484b20e188e6469fa8652f2a20f159745d | /haizhicommon/hzlib/api_zhidao_0627.py | cb9f6bf13b44a8fd9bfac2d4cc2e0b67124c4da8 | [] | no_license | Justinyj/ruyiwebcrawl | 7cf7805d2d1b4a6c3fc7341ab47a2aba096a5323 | 6f7205b00f1a105f4505cf4ee571f2c53762dc3e | refs/heads/master | 2020-07-13T07:24:23.531659 | 2016-11-16T02:18:02 | 2016-11-16T02:18:02 | 73,888,981 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 26,576 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import collections
import codecs
import datetime
import json
import re
import time
import random
import urllib
import difflib
#import distance
import libfile
from parsers.zhidao_parser import parse_search_json_v0615
def getTheFile(filename):
return os.path.abspath(os.path.dirname(__file__)) +"/"+filename
class ZhidaoNlp():
def __init__(self, debug=False):
self.debug = debug
#print "<<<<<<<<<<<", debug
import jieba
#load text
words_lists =[
"skip_words_all",
"skip_words_zhidao",
"baike_words_white",
"baike_words_black",
]
for words in words_lists:
lines = set()
filename = getTheFile("model/{}.human.txt".format(words))
print filename
lines = libfile.file2set(filename)
filename_no = getTheFile("model/{}_no.human.txt".format(words))
if os.path.exists(filename_no):
lines.difference_update( libfile.file2set(filename_no) )
temp = set()
for line in lines:
temp.add( line.split(" ")[0] )
print words, len(temp)
setattr(self, words, temp)
for word in temp:
jieba.add_word( word )
#update skip_word
skip_words_ext = libfile.file2set(getTheFile("model/skip_words_all_ext.human.txt"))
self.skip_words_all.update(skip_words_ext)
print "Number of skip words ", len(self.skip_words_all)
self.jieba = jieba
import jieba.posseg as pseg
self.pseg = pseg
def cut_text(self, text):
if not isinstance(text, unicode):
text = text.decode("utf-8")
return self.jieba.cut(text)
def clean_sentence(self, sentence):
temp = sentence
#map_punc ={".":"。", "?":"?", "!":"!", ",":",", ":":":"}
temp = re.sub(ur"([\u4E00-\u9FA5])\\s?(\.)\\s{0,5}([\u4E00-\u9FA5])","\1。\3",temp)
return temp
def detect_skip_words(self, text, skip_words=None, check_list=["skip_words_all"]):
ret = self.detect_skip_words_0624(text, skip_words=skip_words, check_list=check_list)
#print ret
if ret and ( ret[0]["group"] in ["skip_phrase" ] or ret[0]["group"] in check_list ):
return ret[0]["match"]
return []
def detect_skip_words_0618(self, text, skip_words=None):
m = re.search(u"啪啪啪", text)
if m:
return [m.group(0)]
words = set(self.cut_text(text))
if self.debug:
print "detect_skip_words words", json.dumps(list(words), ensure_ascii=False)
if skip_words is None:
skip_words = self.skip_words_all
ret = words.intersection(skip_words)
if self.debug:
print "detect_skip_words skip_words",json.dumps(list(ret), ensure_ascii=False)
return ret
def detect_skip_words_0624(self, text, skip_words=None, check_list=["skip_words_all"]):
m = re.search(u"啪啪啪", text)
if m:
return [{
"group": "skip_phrase",
"match":[m.group(0)]
}]
#print "<<<<<<<<", self.debug
words = set(self.cut_text(text))
if self.debug:
print "detect_skip_words words", json.dumps(list(words), ensure_ascii=False)
#print json.dumps(list(words), ensure_ascii=False)
ret = []
if skip_words is not None:
item = {
"group": "skip_words_user",
"match": words.intersection(skip_words)
}
ret.append(item)
else:
for group in check_list:
item = {
"group": group,
"match": list(words.intersection( getattr(self, group) ))
}
if item["match"]:
ret.append(item)
break
if self.debug:
print "detect_skip_words_0624 ",json.dumps(ret, ensure_ascii=False)
if ret and ret[0]["match"]:
return ret
else:
return []
def is_answer_bad(self, answer):
if not isinstance(answer, unicode):
answer = answer.decode("utf-8")
if re.search(ur"?|。。。|\.\.\.|\?", answer):
return True
return False
def is_question_baike(self, question, query_filter=2, debug_item={}):
if not isinstance(question, unicode):
question = question.decode("utf-8")
if query_filter == 1:
return self.is_question_baike_0617(question)
elif query_filter == 2:
#print question, query_filter
return self.is_question_baike_0618(question, use_pos = True, debug_item=debug_item)
elif query_filter == 3:
return self.is_question_baike_0618(question, use_pos = False, debug_item=debug_item)
else:
return True
def is_question_baike_0617(self, question):
if not question:
return False
if not isinstance(question, unicode):
question = question.decode("utf-8")
question_clean = self.clean_question(question)
question_clean = re.sub(ur"我[国党们执]","", question_clean)
if re.search(ur"你|我|几点|爸爸|妈妈", question_clean):
return False
elif re.search(ur"什么|最|第一|哪|谁|有没有|几|吗|如何|是|有多|[多最][快慢好坏强高少远长老久]|怎么?样?|啥|?",question):
return True
elif re.search(ur"百科|距离|历史|介绍|信息",question):
return True
else:
return False
def filter_chat(self, q, a):
qa = q+a
a_zh = a
a_zh = re.sub(ur"[^\u4E00-\u9FA5]","", a_zh)
a_zh = re.sub(ur"[,。?!;:]","", a_zh)
if len(a_zh) < 2:
return u"外文"
return False
def get_chat_label(self, q, a):
qa = q+a
map_data = {"skip_phrase": "词组", "skip_words_all": "敏感", "skip_words_zhidao": "知道" }
ret = self.detect_skip_words_0624(qa, check_list=["skip_words_zhidao", "skip_words_all"])
if ret:
return u"{}:{}".format(map_data.get(ret[0]["group"]),u",".join(ret[0]["match"]))
#if re.search(ur"[这那谁]一?[个是]+",q):
# return u"指代"
#if re.search(ur"[?!。\?\!][\u4E00-\u9FA5]",q):
# return u"断句"
return u""
def clean_question(self, question):
question_clean = question
question_clean = re.sub(ur"你(知道|了解|听说|说|认为|觉得|见过|认识)","", question_clean)
question_clean = re.sub(ur"你?(告诉|给|跟)我(讲|说|推荐)?",r"", question_clean)
question_clean = re.sub(u"为何", u"为什么", question_clean)
#question_clean = re.sub(ur"[向对]你",r"", question_clean)
return question_clean
def is_question_baike_0618(self, question, use_pos=True, debug_item=None):
if not question:
if debug_item is not None:
debug_item['note']= u"[-]问题空"
return False
if not isinstance(question, unicode):
question = question.decode("utf-8")
#regex filter black
m = re.search(ur"[你我].{0,5}[有会能可敢喜爱去来给拿要]", question)
if m:
if debug_item is not None:
debug_item['note']= u"[-]闲聊:{}".format( m.group(0) )
return False
#rewrite question
question_clean = self.clean_question(question)
question_clean = re.sub(ur"我[国党们执]","", question_clean)
question_clean = re.sub(ur"(第一|多少|为何|哪)", r" \1 ", question_clean)
words = set(self.cut_text(question_clean))
if use_pos:
words_pos = set(self.pseg.cut(question_clean))
detected_black = self.baike_words_black.intersection(words)
detected_white = self.baike_words_white.intersection(words)
if debug_item is not None:
debug_item["detected_black"] = list(detected_black)
debug_item["detected_white"] = list(detected_white)
debug_item["words"] = list(words)
if use_pos and words_pos:
temp = []
for word,flag in words_pos:
temp.append(u"{}[{}]".format(word,flag))
debug_item["words_pos"] = temp
if len(detected_black) > 0:
if debug_item is not None:
debug_item['note']= u"[-]黑名单:{}".format( u"/".join(detected_black) )
return False
if len(detected_white) > 0:
if debug_item is not None:
debug_item['note']= u"[+]白名单:{}".format( u"/".join(detected_white) )
# if use_pos and words_pos:
# good_words = [word for word, flag in words_pos if flag.startswith("n") ]
# #print question_clean, good_words
# return len(good_words)>0
# else:
return True
if use_pos and words_pos:
if len(words)<10:
# all noun
for word, flag in words_pos:
#if not flag.startswith("n") and flag not in ["a","uj","x","y","t","l"]:
if flag in ["r"]:
#结尾是动词
#if flag in ['v'] and question_clean.endswith(word):
# return True
#print word, flag
if debug_item is not None:
debug_item['note']= u"[-]词性指代:{}".format( u"/".join( debug_item["words_pos"] ) )
return False
if debug_item is not None:
debug_item['note']= u"[+]词性名词:{}".format( u"/".join( debug_item["words_pos"] ) )
return True
if debug_item is not None:
debug_item['note']= u"[-]其他"
return False
def select_qapair_0624(self, query, search_result_json, result_limit=3, answer_len_limit=40, question_len_limit=30, question_match_limit=0.3):
result_answers = []
for item in search_result_json:
if "answers" not in item:
continue
#too long question
if len(item["question"]) > question_len_limit:
#print "skip question_len_limit", len(item["question"])
continue
#skip long answers
if len(item["answers"]) > answer_len_limit:
#print "skip answer_len_limit", type(item["answers"]), len(item["answers"]), item["answers"]
continue
if self.filter_chat(item["question"], item["answers"]):
continue
question_match_score = difflib.SequenceMatcher(None, query, item["question"]).ratio()
answer_match_score = difflib.SequenceMatcher(None, query, item["answers"]).ratio()
item["match_score"] = question_match_score
item["match_score_answers"] = answer_match_score
item["label"] = self.get_chat_label(item["question"], item["answers"])
#skip not matching questions
if (question_match_score < question_match_limit):
#print "skip question_match_limit", question_match_score
continue
result_answers.append(item)
ret = sorted(result_answers, key= lambda x: 0 - x["match_score"] )
if len(ret) > result_limit:
ret = ret[:result_limit]
# if len(ret) == 0:
# for item in search_result_json:
# print u"{} | {} | {} | {} | {} | {}".format(query, item["question"], item.get("answers"), item.get("label"), item.get("match_score"), item.get("match_score_answers"))
# print json.dumps(item, ensure_ascii=False)
return ret
class ZhidaoFetch():
def __init__(self, config={}):
self.debug = config.get("debug")
self.api_nlp = ZhidaoNlp(self.debug)
self.config = config
if config:
from downloader.downloader_wrapper import DownloadWrapper
print self.config
self.downloader = DownloadWrapper(self.config.get("cache_server"), self.config["crawl_http_headers"])
def parse_query(self,query_unicode, query_parser=0):
if query_parser == 1:
qword = u" ".join(self.api_nlp.cut_text(query_unicode))
else:
qword = query_unicode
return qword
def get_search_url_qword(self,query_unicode, query_parser=0, page_number=0):
qword = self.parse_query(query_unicode, query_parser=query_parser)
if page_number == 0:
query_url = "http://zhidao.baidu.com/search/?word={0}".format( urllib.quote(qword.encode("utf-8")) )
else:
query_url = "http://zhidao.baidu.com/search/?pn={}&word={}".format( page_number*10, urllib.quote(query) )
return query_url, qword
def select_best_qapair_0616(self,search_result_json):
for item in search_result_json:
if item["is_recommend"] == 1:
#Thread(target = post_zhidao_fetch_job, args = (item, ) ).start()
ret ["best_qapair"] = item
return ret
def select_top_n_chat_0621(self, query, search_result_json, num_answers_needed):
good_answers = []
bad_answers = []
result_answers = []
match_score_threshold = 0.6
for item in search_result_json:
#print type(query), type(item["question"])
discount_skip_word = 0
if self.api_nlp.detect_skip_words(item["question"]):
print "did not skip min-gan-ci question"
# continue
if self.api_nlp.detect_skip_words(item["answers"]):
print "did not skip min-gan-ci answers"
# continue
match_score = difflib.SequenceMatcher(None, query, item["question"]).ratio()
item["match_score"] = match_score
if self.api_nlp.is_answer_bad(item["answers"]):
bad_answers.append(item)
else:
good_answers.append(item)
for item in sorted(good_answers, key=lambda elem: 0-elem["match_score"]):
match_score = item["match_score"]
if match_score >= match_score_threshold and len(result_answers) < num_answers_needed:
result_answers.append(item)
else:
break
if len(result_answers) < num_answers_needed:
for item in sorted(bad_answers, key=lambda elem: 0-elem["match_score"]):
match_score = item["match_score"]
if match_score >= match_score_threshold and len(result_answers) < num_answers_needed:
result_answers.append(item)
else:
break
return result_answers
def select_top_n_chat_0622(self, query, search_result_json, result_limit=3, answer_len_limit=30, question_len_limit=20, question_match_limit=0.4):
result_answers = []
for item in search_result_json:
if "answers" not in item:
continue
#skip long answers
if len(item["answers"]) > answer_len_limit:
#print "skip answer_len_limit", type(item["answers"]), len(item["answers"]), item["answers"]
continue
#too long question
if len(item["question"]) > question_len_limit:
#print "skip question_len_limit", len(item["question"])
continue
if self.api_nlp.filter_chat(item["question"], item["answers"]):
continue
question_match_score = difflib.SequenceMatcher(None, query, item["question"]).ratio()
# question_match_score_b = difflib.SequenceMatcher(None, item["question"], query).ratio()
item["match_score"] = question_match_score
item["label"] = self.api_nlp.get_chat_label(item["question"], item["answers"])
#skip not matching questions
if (question_match_score < question_match_limit):
#print "skip question_match_limit", question_match_score
continue
result_answers.append(item)
ret = sorted(result_answers, key= lambda x: 0 - x["match_score"])
if len(ret) > result_limit:
ret = ret[:result_limit]
return ret
def search_chat_top_n(self,query,num_answers_needed=3,query_filter=2, query_parser=0, select_best=True):
result = self.prepare_query(query, query_filter, query_parser, use_skip_words=False)
if not result:
return False
ret = result["ret"]
query_url = result["query_url"]
query_unicode = ret["query"]
#if self.api_nlp.is_question_baike( query_unicode , query_filter= query_filter):
# print "not skip query, baike", query_filter, query_unicode
# return False
#print query
ts_start = time.time()
content = self.download(query_url)
ret ["milliseconds_fetch"] = int( (time.time() - ts_start) * 1000 )
if content:
ret ["content_len"] = len(content)
#print type(content)
#print content
if select_best and content:
ts_start = time.time()
search_result_json = parse_search_json_v0615(content)
ret ["milliseconds_parse"] = int( (time.time() - ts_start) * 1000 )
ret ["item_len"] = len(search_result_json)
answer_items = self.select_top_n_chat_0622(query_unicode, search_result_json, num_answers_needed)
#print "select_best", len(answer_items)
ret ["items"] = answer_items
ret ["items_all"] = search_result_json
# if answer_items:
# index = 0
# for item in answer_items:
# ret ["qapair{}".format(index)] = item
# index += 1
# return ret
#print json.dumps(search_result_json,ensure_ascii=False)
return ret
def select_best_qapair_0617(self,query, search_result_json):
best_item = None
best_score = 0.4
best_cnt_like = -1
for item in search_result_json:
print json.dumps(item, ensure_ascii=False)
print "\n\n--------select_best_qapair_0617 "
#print type(query), type(item["question"])
discount_skip_word = 0
temp = self.api_nlp.detect_skip_words(item["question"])
if temp:
print "skip min-gan-ci question", json.dumps(list(temp), ensure_ascii=False)
item["debug_note"] = u"[-]问答对-问题敏感词:{}".format( u"/".join( temp ))
continue
temp = self.api_nlp.detect_skip_words(item["answers"], check_list=["skip_words_zhidao", "skip_words_all"])
if temp:
print "skip min-gan-ci answers", json.dumps(list(temp), ensure_ascii=False)
item["debug_note"] = u"[-]问答对-答案敏感词:{}".format( u"/".join( temp ))
continue
if self.api_nlp.is_answer_bad(item["answers"]):
print "skip bad answers"
item["debug_note"] = u"[-]问答对-答案有符号"
continue
match_score = difflib.SequenceMatcher(None, query, item["question"]).ratio()
item["match_score"] = match_score
if self.api_nlp.debug:
print match_score, discount_skip_word, item["answers"]
#print query, item["question"] ,match_score, item["cnt_like"]
this_answer_is_better = False
if match_score > best_score * 1.5:
this_answer_is_better = True
elif match_score > best_score * 0.9 and item["cnt_like"] > best_cnt_like:
this_answer_is_better = True
if this_answer_is_better:
best_item = item
best_score = match_score
best_cnt_like = item["cnt_like"]
return best_item
def search_baike_best(self,query, query_filter=2, query_parser=0, debug_item=None):
result = self.prepare_query(query, query_filter, query_parser, debug_item=debug_item)
if not result:
return False
ret = result["ret"]
query_url = result["query_url"]
query_unicode = ret["query"]
if not self.api_nlp.is_question_baike( query_unicode , query_filter= query_filter, debug_item=debug_item):
print "skip query, not baike", query_filter, query_unicode
return False
ts_start = time.time()
content = self.download(query_url)
ret ["milliseconds_fetch"] = int( (time.time() - ts_start) * 1000 )
if content:
ts_start = time.time()
search_result_json = parse_search_json_v0615(content)
ret ["milliseconds_parse"] = int( (time.time() - ts_start) * 1000 )
if self.debug:
ret["items_all"] = search_result_json
best_item = self.select_best_qapair_0617(query_unicode, search_result_json)
if best_item:
ret ["best_qapair"] = best_item
return ret
#print json.dumps(search_result_json,ensure_ascii=False)
#print ">>>>>>", content
return ret
def search_all(self, query, query_filter=0, query_parser=0, limit=10):
max_page_number = (limit-1)/10+1
output = { "items":[], "metadata":[], "query":query, "limit":limit,
"query_filter":query_filter, "query_parser":query_parser }
for page_number in range(max_page_number):
result = self.prepare_query(query, query_filter, query_parser, use_skip_words=False)
if not result:
print query
break
ret = result["ret"]
query_url = result["query_url"]
query_unicode = ret["query"]
ts_start = time.time()
content = self.download(query_url)
ret ["milliseconds_fetch"] = int( (time.time() - ts_start) * 1000 )
if content:
ts_start = time.time()
search_result_json = parse_search_json_v0615(content)
ret ["milliseconds_parse"] = int( (time.time() - ts_start) * 1000 )
output["items"].extend( search_result_json )
output["metadata"].extend( ret )
return output
def prepare_query(self, query, query_filter, query_parser, use_skip_words=True, page_number=0, debug_item=None):
if not query:
print "skip query, empty"
if debug_item is not None:
debug_item["debug_note"] = u"[-]问题空:prepare_query"
return False
query_unicode = query
if not isinstance(query_unicode, unicode):
query_unicode = query_unicode.decode("utf-8")
if use_skip_words:
detected_words = self.api_nlp.detect_skip_words(query_unicode)
if detected_words:
print "skip bad query, empty"
if debug_item is not None:
debug_item["debug_note"] = u"[-]问题敏感词:{}".format( u"/".format( detected_words ) )
return False
query_unicode = re.sub(u"?$","",query_unicode)
query_url, qword = self.get_search_url_qword(query_unicode, query_parser, page_number=page_number)
ret = {
"query":query_unicode,
}
if query_parser == 1:
ret["qword"] = qword
return {"ret":ret, "query_url":query_url}
def search_chat_best(self,query, query_filter=2, query_parser=0):
result = self.prepare_query(query, query_filter, query_parser)
if not result:
return False
ret = result["ret"]
query_url = result["query_url"]
query_unicode = ret["query"]
if not self.api_nlp.is_question_baike( query_unicode , query_filter= query_filter):
print "skip query, not baike", query_filter, query_unicode
return False
ts_start = time.time()
content = self.download(query_url)
ret ["milliseconds_fetch"] = int( (time.time() - ts_start) * 1000 )
if content:
ts_start = time.time()
search_result_json = parse_search_json_v0615(content)
ret ["milliseconds_parse"] = int( (time.time() - ts_start) * 1000 )
#deprecated
best_item = self.select_best_chat_0621(query_unicode, search_result_json)
if best_item:
ret ["best_qapair"] = best_item
return ret
#print json.dumps(search_result_json,ensure_ascii=False)
return False
def download(self, query_url):
if self.config:
return self.downloader.download_with_cache(
query_url,
self.config["batch_id"],
self.config["crawl_gap"],
self.config["crawl_http_method"],
self.config["crawl_timeout"],
encoding='gb18030',
redirect_check=True,
error_check=False,
refresh=False)
else:
return self.download_direct(query_url)
def download_direct(self, query_url):
import requests
#print query_url
encoding='gb18030'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,en-US;q=0.8,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': 1,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36',
}
headers["Host"] = "zhidao.baidu.com"
print query_url
r = requests.get(query_url, timeout=10, headers=headers)
if r:
r.encoding = encoding
return r.text
| [
"lidingpku@gmail.com"
] | lidingpku@gmail.com |
e0fab741660cd79a9bb39662c41807ef1d654d98 | 168978c0d4e33f9a2e614c51e77a75cd6393def8 | /blog/migrations/0002_comment.py | 153650079fc199576b2a749ee91fe89c86e6be33 | [] | no_license | Faisal-Zamir/Online_Course | cbb548a9c98ba24c907be584dd2aa418235bd8a2 | 73a3c5f84af68acafc85d6762f28858900c688ef | refs/heads/master | 2022-12-06T08:32:45.940898 | 2020-08-24T03:15:19 | 2020-08-24T03:15:19 | 289,815,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # Generated by Django 3.0.7 on 2020-08-23 08:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ['-created_on'],
},
),
]
| [
"Jafriweb@gmail.com"
] | Jafriweb@gmail.com |
fd03335e59e081fdbdaab6e2a3b46cb34f7afd28 | 0903058aeac42ae40371f5a31978df8fdc838162 | /feature_engine/creation/mathematical_combination.py | fda42a6dc8c184102a96825077b8f9d93a5e2a87 | [
"BSD-3-Clause"
] | permissive | vasusuren/feature_engine | 5f852c229153dc6a5a2e4f7787152b1aa4c2ae6f | 24b8cbdc0aea1e8c266b805947f38e9f02369d69 | refs/heads/master | 2023-04-10T03:09:47.328514 | 2021-04-12T16:16:47 | 2021-04-12T16:16:47 | 358,562,154 | 0 | 0 | BSD-3-Clause | 2021-06-10T13:14:48 | 2021-04-16T10:28:45 | null | UTF-8 | Python | false | false | 10,885 | py | from typing import List, Optional, Union
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_check_contains_na,
_check_input_matches_training_df,
_is_dataframe,
)
from feature_engine.variable_manipulation import _find_or_check_numerical_variables
class MathematicalCombination(BaseEstimator, TransformerMixin):
"""
MathematicalCombination() applies basic mathematical operations to multiple
features, returning one or more additional features as a result. That is, it sums,
multiplies, takes the average, maximum, minimum or standard deviation of a group
of variables and returns the result into new variables.
For example, if we have the variables **number_payments_first_quarter**,
**number_payments_second_quarter**, **number_payments_third_quarter** and
**number_payments_fourth_quarter**, we can use MathematicalCombination() to
calculate the total number of payments and mean number of payments as follows:
.. code-block:: python
transformer = MathematicalCombination(
variables_to_combine=[
'number_payments_first_quarter',
'number_payments_second_quarter',
'number_payments_third_quarter',
'number_payments_fourth_quarter'
],
math_operations=[
'sum',
'mean'
],
new_variables_name=[
'total_number_payments',
'mean_number_payments'
]
)
Xt = transformer.fit_transform(X)
The transformed X, Xt, will contain the additional features
**total_number_payments** and **mean_number_payments**, plus the original set of
variables.
Parameters
----------
variables_to_combine : list
The list of numerical variables to be combined.
math_operations : list, default=None
The list of basic math operations to be used to create the new features.
If None, all of ['sum', 'prod', 'mean', 'std', 'max', 'min'] will be performed
over the `variables_to_combine`. Alternatively, the user can enter the list of
operations to carry out.
Each operation should be a string and must be one of the elements
from the list: ['sum', 'prod', 'mean', 'std', 'max', 'min']
Each operation will result in a new variable that will be added to the
transformed dataset.
new_variables_names : list, default=None
Names of the newly created variables. The user can enter a name or a list
of names for the newly created features (recommended). The user must enter
one name for each mathematical transformation indicated in the `math_operations`
parameter. That is, if you want to perform mean and sum of features, you
should enter 2 new variable names. If you perform only mean of features,
enter 1 variable name. Alternatively, if you chose to perform all
mathematical transformations, enter 6 new variable names.
The name of the variables indicated by the user should coincide with the order
in which the mathematical operations are initialised in the transformer.
That is, if you set math_operations = ['mean', 'prod'], the first new variable
name will be assigned to the mean of the variables and the second variable name
to the product of the variables.
If `new_variable_names = None`, the transformer will assign an arbitrary name
to the newly created features starting by the name of the mathematical
operation, followed by the variables combined separated by -.
Attributes
----------
combination_dict_ :
Dictionary containing the mathematical operation to column name pairs
math_operations_ :
List with the mathematical operations to be applied to the
`variables_to_combine`.
Methods
-------
fit:
This transformer does not learn parameters.
transform:
Combine the variables with the mathematical operations.
fit_transform:
Fit to the data, then transform it.
Notes
-----
Although the transformer in essence allows us to combine any feature with any of
the allowed mathematical operations, its used is intended mostly for the creation
of new features based on some domain knowledge. Typical examples within the
financial sector are:
- Sum debt across financial products, i.e., credit cards, to obtain the total debt.
- Take the average payments to various financial products per month.
- Find the Minimum payment done at any one month.
In insurance, we can sum the damage to various parts of a car to obtain the
total damage.
"""
def __init__(
self,
variables_to_combine: List[Union[str, int]],
math_operations: Optional[List[str]] = None,
new_variables_names: Optional[List[str]] = None,
) -> None:
# check input types
if not isinstance(variables_to_combine, list) or not all(
isinstance(var, (int, str)) for var in variables_to_combine
):
raise ValueError(
"variables_to_combine takes a list of strings or integers "
"corresponding to the names of the variables to combine "
"with the mathematical operations."
)
if new_variables_names:
if not isinstance(new_variables_names, list) or not all(
isinstance(var, str) for var in new_variables_names
):
raise ValueError(
"new_variable_names should be None or a list with the "
"names to be assigned to the new variables created by"
"the mathematical combinations."
)
if math_operations:
if not isinstance(math_operations, list):
raise ValueError("math_operations parameter must be a list or None")
if any(
operation not in ["sum", "prod", "mean", "std", "max", "min"]
for operation in math_operations
):
raise ValueError(
"At least one of the entered math_operations is not supported. "
"Choose one or more of ['sum', 'prod', 'mean', 'std', 'max', 'min']"
)
# check input logic
if len(variables_to_combine) <= 1:
raise KeyError(
"MathematicalCombination requires two or more features to make proper "
"transformations."
)
if new_variables_names:
if len(new_variables_names) != len(math_operations): # type: ignore
raise ValueError(
"Number of items in new_variables_names must be equal to number of "
"items in math_operations."
"In other words, the transformer needs as many new variable names"
"as mathematical operations to perform over the variables to "
"combine."
)
self.variables_to_combine = variables_to_combine
self.new_variables_names = new_variables_names
self.math_operations = math_operations
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn parameters.
Perform dataframe checks. Creates dictionary of operation to new feature
name pairs.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples. Can be the entire dataframe, not just the
variables to transform.
y : pandas Series, or np.array. Defaults to None.
It is not needed in this transformer. You can pass y or None.
Raises
------
TypeError
- If the input is not a Pandas DataFrame
- If any user provided variables in variables_to_combine are not numerical
ValueError
If the variable(s) contain null values
Returns
-------
self
"""
# check input dataframe
X = _is_dataframe(X)
# check variables to combine are numerical
self.variables_to_combine = _find_or_check_numerical_variables(
X, self.variables_to_combine
)
# check if dataset contains na
_check_contains_na(X, self.variables_to_combine)
if self.math_operations is None:
self.math_operations_ = ["sum", "prod", "mean", "std", "max", "min"]
else:
self.math_operations_ = self.math_operations
# dictionary of new_variable_name to operation pairs
if self.new_variables_names:
self.combination_dict_ = dict(
zip(self.new_variables_names, self.math_operations_)
)
else:
if all(isinstance(var, str) for var in self.variables_to_combine):
vars_ls = self.variables_to_combine
else:
vars_ls = [str(var) for var in self.variables_to_combine]
self.combination_dict_ = {
f"{operation}({'-'.join(vars_ls)})": operation # type: ignore
for operation in self.math_operations_
}
self.input_shape_ = X.shape
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Combine the variables with the mathematical operations.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The data to transform.
Raises
------
TypeError
If the input is not a Pandas DataFrame
ValueError
- If the variable(s) contain null values
- If the dataframe is not of the same size as that used in fit()
Returns
-------
X : Pandas dataframe, shape = [n_samples, n_features + n_operations]
The dataframe with the original variables plus the new variables.
"""
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables_to_combine)
# Check if input data contains same number of columns as dataframe used to fit.
_check_input_matches_training_df(X, self.input_shape_[1])
# combine mathematically
for new_variable_name, operation in self.combination_dict_.items():
X[new_variable_name] = X[self.variables_to_combine].agg(operation, axis=1)
return X
| [
"noreply@github.com"
] | vasusuren.noreply@github.com |
877389eadf5431f86cce9536338e7780b5b6f092 | 090324db0c04d8c30ad6688547cfea47858bf3af | /tests/test_sokorule.py | d0e02c22a7085d11f93f4eebbaa8548dce508f8b | [] | no_license | fidlej/sokobot | b82c4c36d73e224d0d0e1635021ca04485da589e | d3d04753a5043e6a22dafd132fa633d8bc66b9ea | refs/heads/master | 2021-01-21T13:14:29.523501 | 2011-06-12T07:34:14 | 2011-06-12T07:34:14 | 32,650,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py |
from nose.tools import assert_equal
from soko.struct.rules.sokorule import PushRule, SokobanGoalRule
from soko.struct import modeling
def test_get_children():
rule = PushRule()
s = (
"# #",
"#@. #",
" $ #",
" #",
"#####",
)
used_cells = set()
children = rule.get_children(s, used_cells)
assert_equal(3, len(children))
_assert_contains(children,
("# #",
"# + #",
" $ #",
" #",
"#####",
))
_assert_contains(children,
("#@ #",
"# . #",
" $ #",
" #",
"#####",
))
_assert_contains(children,
("# #",
"# . #",
" @ #",
" $ #",
"#####",
))
used_s = modeling.mutablize(s)
for pos in used_cells:
x, y = pos
used_s[y][x] = "!"
assert_equal(modeling.immutablize(
(
"#! #",
"!!! #",
" ! #",
" ! #",
"#####",
)), modeling.immutablize(used_s))
def test_get_children_from_end_state():
s = modeling.immutablize("""\
#$ #
@ .#
#
#
#####""".splitlines())
rule = PushRule()
used_cells = set()
children = rule.get_children(s, used_cells)
assert_equal(None, children)
assert_equal(set(), used_cells)
def test_is_goaling():
rule = SokobanGoalRule()
s = (
"# #",
"# .#",
" $#",
" @#",
"#####",
)
next_s = (
"# #",
"# *#",
" @#",
" #",
"#####",
)
assert_equal(True, rule.is_goaling(s, next_s))
assert_equal(False, rule.is_goaling(next_s, s))
assert_equal(False, rule.is_goaling(next_s, next_s))
def _assert_contains(childern, s):
s = modeling.immutablize(s)
assert s in childern
| [
"ivo@danihelka.net"
] | ivo@danihelka.net |
68d9ab65613c09fa8f9fb2cc9c777da8f5849f98 | bea556733142d4a41562f4c9e0d26418780f244e | /tools/cef_parser.py | d624358ad9ecb90298be67f31df591c9d7a548fa | [
"BSD-3-Clause"
] | permissive | EricTop3/cef | fd48f706b27a51951b830a6673be10a9e63030c5 | e83d8d6a131ad39b98c97c945ccf77bcd723378f | refs/heads/master | 2023-09-04T00:11:52.720554 | 2021-11-09T19:21:58 | 2021-11-09T19:21:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66,664 | py | # Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from date_util import *
from file_util import *
import os
import re
import shutil
import string
import sys
import textwrap
import time
def notify(msg):
""" Display a message. """
sys.stdout.write(' NOTE: ' + msg + '\n')
def wrap_text(text, indent='', maxchars=80):
""" Wrap the text to the specified number of characters. If
necessary a line will be broken and wrapped after a word.
"""
result = ''
lines = textwrap.wrap(text, maxchars - len(indent))
for line in lines:
result += indent + line + '\n'
return result
def is_base_class(clsname):
""" Returns true if |clsname| is a known base (root) class in the object
hierarchy.
"""
return clsname == 'CefBaseRefCounted' or clsname == 'CefBaseScoped'
def get_capi_file_name(cppname):
""" Convert a C++ header file name to a C API header file name. """
return cppname[:-2] + '_capi.h'
def get_capi_name(cppname, isclassname, prefix=None):
""" Convert a C++ CamelCaps name to a C API underscore name. """
result = ''
lastchr = ''
for chr in cppname:
# add an underscore if the current character is an upper case letter
# and the last character was a lower case letter
if len(result) > 0 and not chr.isdigit() \
and chr.upper() == chr \
and not lastchr.upper() == lastchr:
result += '_'
result += chr.lower()
lastchr = chr
if isclassname:
result += '_t'
if not prefix is None:
if prefix[0:3] == 'cef':
# if the prefix name is duplicated in the function name
# remove that portion of the function name
subprefix = prefix[3:]
pos = result.find(subprefix)
if pos >= 0:
result = result[0:pos] + result[pos + len(subprefix):]
result = prefix + '_' + result
return result
def get_wrapper_type_enum(cppname):
""" Returns the wrapper type enumeration value for the specified C++ class
name. """
return 'WT_' + get_capi_name(cppname, False)[4:].upper()
def get_prev_line(body, pos):
""" Retrieve the start and end positions and value for the line immediately
before the line containing the specified position.
"""
end = body.rfind('\n', 0, pos)
start = body.rfind('\n', 0, end) + 1
line = body[start:end]
return {'start': start, 'end': end, 'line': line}
def get_comment(body, name):
""" Retrieve the comment for a class or function. """
result = []
pos = body.find(name)
in_block_comment = False
while pos > 0:
data = get_prev_line(body, pos)
line = data['line'].strip()
pos = data['start']
if len(line) == 0:
# check if the next previous line is a comment
prevdata = get_prev_line(body, pos)
prevline = prevdata['line'].strip()
if prevline[0:2] == '//' and prevline[0:3] != '///':
result.append(None)
else:
break
# single line /*--cef()--*/
elif line[0:2] == '/*' and line[-2:] == '*/':
continue
# start of multi line /*--cef()--*/
elif in_block_comment and line[0:2] == '/*':
in_block_comment = False
continue
# end of multi line /*--cef()--*/
elif not in_block_comment and line[-2:] == '*/':
in_block_comment = True
continue
elif in_block_comment:
continue
elif line[0:2] == '//':
# keep the comment line including any leading spaces
result.append(line[2:])
else:
break
result.reverse()
return result
def validate_comment(file, name, comment):
""" Validate the comment array returned by get_comment(). """
# Verify that the comment contains beginning and ending '///' as required by
# CppDoc (the leading '//' from each line will already have been removed by
# the get_comment() logic). There may be additional comments proceeding the
# CppDoc block so we look at the quantity of lines equaling '/' and expect
# the last line to be '/'.
docct = 0
for line in comment:
if not line is None and len(line) > 0 and line == '/':
docct = docct + 1
if docct != 2 or len(comment) < 3 or comment[len(comment) - 1] != '/':
raise Exception('Missing or incorrect comment in %s for: %s' % \
(file, name))
def format_comment(comment, indent, translate_map=None, maxchars=80):
""" Return the comments array as a formatted string. """
if not translate_map is None:
# Replace longest keys first in translation.
translate_keys = sorted(
translate_map.keys(), key=lambda item: (-len(item), item))
result = ''
wrapme = ''
hasemptyline = False
for line in comment:
# if the line starts with a leading space, remove that space
if not line is None and len(line) > 0 and line[0:1] == ' ':
line = line[1:]
didremovespace = True
else:
didremovespace = False
if line is None or len(line) == 0 or line[0:1] == ' ' \
or line[0:1] == '/':
# the previous paragraph, if any, has ended
if len(wrapme) > 0:
if not translate_map is None:
# apply the translation
for key in translate_keys:
wrapme = wrapme.replace(key, translate_map[key])
# output the previous paragraph
result += wrap_text(wrapme, indent + '// ', maxchars)
wrapme = ''
if not line is None:
if len(line) == 0 or line[0:1] == ' ' or line[0:1] == '/':
# blank lines or anything that's further indented should be
# output as-is
result += indent + '//'
if len(line) > 0:
if didremovespace:
result += ' ' + line
else:
result += line
result += '\n'
else:
# add to the current paragraph
wrapme += line + ' '
else:
# output an empty line
hasemptyline = True
result += '\n'
if len(wrapme) > 0:
if not translate_map is None:
# apply the translation
for key in translate_map.keys():
wrapme = wrapme.replace(key, translate_map[key])
# output the previous paragraph
result += wrap_text(wrapme, indent + '// ', maxchars)
if hasemptyline:
# an empty line means a break between comments, so the comment is
# probably a section heading and should have an extra line before it
result = '\n' + result
return result
def format_translation_changes(old, new):
""" Return a comment stating what is different between the old and new
function prototype parts.
"""
changed = False
result = ''
# normalize C API attributes
oldargs = [x.replace('struct _', '') for x in old['args']]
oldretval = old['retval'].replace('struct _', '')
newargs = [x.replace('struct _', '') for x in new['args']]
newretval = new['retval'].replace('struct _', '')
# check if the prototype has changed
oldset = set(oldargs)
newset = set(newargs)
if len(oldset.symmetric_difference(newset)) > 0:
changed = True
result += '\n // WARNING - CHANGED ATTRIBUTES'
# in the implementation set only
oldonly = oldset.difference(newset)
for arg in oldonly:
result += '\n // REMOVED: ' + arg
# in the current set only
newonly = newset.difference(oldset)
for arg in newonly:
result += '\n // ADDED: ' + arg
# check if the return value has changed
if oldretval != newretval:
changed = True
result += '\n // WARNING - CHANGED RETURN VALUE'+ \
'\n // WAS: '+old['retval']+ \
'\n // NOW: '+new['retval']
if changed:
result += '\n #pragma message("Warning: "__FILE__": '+new['name']+ \
' prototype has changed")\n'
return result
def format_translation_includes(header, body):
""" Return the necessary list of includes based on the contents of the
body.
"""
result = ''
# <algorithm> required for VS2013.
if body.find('std::min') > 0 or body.find('std::max') > 0:
result += '#include <algorithm>\n'
if body.find('cef_api_hash(') > 0:
result += '#include "include/cef_api_hash.h"\n'
# identify what CppToC classes are being used
p = re.compile('([A-Za-z0-9_]{1,})CppToC')
list = sorted(set(p.findall(body)))
for item in list:
directory = ''
if not is_base_class(item):
cls = header.get_class(item)
dir = cls.get_file_directory()
if not dir is None:
directory = dir + '/'
result += '#include "libcef_dll/cpptoc/'+directory+ \
get_capi_name(item[3:], False)+'_cpptoc.h"\n'
# identify what CToCpp classes are being used
p = re.compile('([A-Za-z0-9_]{1,})CToCpp')
list = sorted(set(p.findall(body)))
for item in list:
directory = ''
if not is_base_class(item):
cls = header.get_class(item)
dir = cls.get_file_directory()
if not dir is None:
directory = dir + '/'
result += '#include "libcef_dll/ctocpp/'+directory+ \
get_capi_name(item[3:], False)+'_ctocpp.h"\n'
if body.find('shutdown_checker') > 0:
result += '#include "libcef_dll/shutdown_checker.h"\n'
if body.find('transfer_') > 0:
result += '#include "libcef_dll/transfer_util.h"\n'
return result
def str_to_dict(str):
""" Convert a string to a dictionary. If the same key has multiple values
the values will be stored in a list. """
dict = {}
parts = str.split(',')
for part in parts:
part = part.strip()
if len(part) == 0:
continue
sparts = part.split('=')
if len(sparts) > 2:
raise Exception('Invalid dictionary pair format: ' + part)
name = sparts[0].strip()
if len(sparts) == 2:
val = sparts[1].strip()
else:
val = True
if name in dict:
# a value with this name already exists
curval = dict[name]
if not isinstance(curval, list):
# convert the string value to a list
dict[name] = [curval]
dict[name].append(val)
else:
dict[name] = val
return dict
def dict_to_str(dict):
""" Convert a dictionary to a string. """
str = []
for name in dict.keys():
if not isinstance(dict[name], list):
if dict[name] is True:
# currently a bool value
str.append(name)
else:
# currently a string value
str.append(name + '=' + dict[name])
else:
# currently a list value
for val in dict[name]:
str.append(name + '=' + val)
return ','.join(str)
# regex for matching comment-formatted attributes
_cre_attrib = '/\*--cef\(([A-Za-z0-9_ ,=:\n]{0,})\)--\*/'
# regex for matching class and function names
_cre_cfname = '([A-Za-z0-9_]{1,})'
# regex for matching class and function names including path separators
_cre_cfnameorpath = '([A-Za-z0-9_\/]{1,})'
# regex for matching function return values
_cre_retval = '([A-Za-z0-9_<>:,\*\&]{1,})'
# regex for matching typedef value and name combination
_cre_typedef = '([A-Za-z0-9_<>:,\*\&\s]{1,})'
# regex for matching function return value and name combination
_cre_func = '([A-Za-z][A-Za-z0-9_<>:,\*\&\s]{1,})'
# regex for matching virtual function modifiers + arbitrary whitespace
_cre_vfmod = '([\sA-Za-z0-9_]{0,})'
# regex for matching arbitrary whitespace
_cre_space = '[\s]{1,}'
# regex for matching optional virtual keyword
_cre_virtual = '(?:[\s]{1,}virtual){0,1}'
# Simple translation types. Format is:
# 'cpp_type' : ['capi_type', 'capi_default_value']
_simpletypes = {
'void': ['void', ''],
'void*': ['void*', 'NULL'],
'int': ['int', '0'],
'int16': ['int16', '0'],
'uint16': ['uint16', '0'],
'int32': ['int32', '0'],
'uint32': ['uint32', '0'],
'int64': ['int64', '0'],
'uint64': ['uint64', '0'],
'double': ['double', '0'],
'float': ['float', '0'],
'float*': ['float*', 'NULL'],
'long': ['long', '0'],
'unsigned long': ['unsigned long', '0'],
'long long': ['long long', '0'],
'size_t': ['size_t', '0'],
'bool': ['int', '0'],
'char': ['char', '0'],
'char* const': ['char* const', 'NULL'],
'cef_color_t': ['cef_color_t', '0'],
'cef_json_parser_error_t': ['cef_json_parser_error_t', 'JSON_NO_ERROR'],
'cef_plugin_policy_t': ['cef_plugin_policy_t', 'PLUGIN_POLICY_ALLOW'],
'CefCursorHandle': ['cef_cursor_handle_t', 'kNullCursorHandle'],
'CefCompositionUnderline': [
'cef_composition_underline_t', 'CefCompositionUnderline()'
],
'CefEventHandle': ['cef_event_handle_t', 'kNullEventHandle'],
'CefWindowHandle': ['cef_window_handle_t', 'kNullWindowHandle'],
'CefInsets': ['cef_insets_t', 'CefInsets()'],
'CefPoint': ['cef_point_t', 'CefPoint()'],
'CefRect': ['cef_rect_t', 'CefRect()'],
'CefSize': ['cef_size_t', 'CefSize()'],
'CefRange': ['cef_range_t', 'CefRange()'],
'CefDraggableRegion': ['cef_draggable_region_t', 'CefDraggableRegion()'],
'CefThreadId': ['cef_thread_id_t', 'TID_UI'],
'CefTime': ['cef_time_t', 'CefTime()'],
'CefAudioParameters': ['cef_audio_parameters_t', 'CefAudioParameters()']
}
def get_function_impls(content, ident, has_impl=True):
""" Retrieve the function parts from the specified contents as a set of
return value, name, arguments and body. Ident must occur somewhere in
the value.
"""
# extract the functions
find_regex = '\n' + _cre_func + '\((.*?)\)([A-Za-z0-9_\s]{0,})'
if has_impl:
find_regex += '\{(.*?)\n\}'
else:
find_regex += '(;)'
p = re.compile(find_regex, re.MULTILINE | re.DOTALL)
list = p.findall(content)
# build the function map with the function name as the key
result = []
for retval, argval, vfmod, body in list:
if retval.find(ident) < 0:
# the identifier was not found
continue
# remove the identifier
retval = retval.replace(ident, '')
retval = retval.strip()
# Normalize the delimiter.
retval = retval.replace('\n', ' ')
# retrieve the function name
parts = retval.split(' ')
name = parts[-1]
del parts[-1]
retval = ' '.join(parts)
# parse the arguments
args = []
for v in argval.split(','):
v = v.strip()
if len(v) > 0:
args.append(v)
result.append({
'retval': retval.strip(),
'name': name,
'args': args,
'vfmod': vfmod.strip(),
'body': body if has_impl else '',
})
return result
def get_next_function_impl(existing, name):
result = None
for item in existing:
if item['name'] == name:
result = item
existing.remove(item)
break
return result
def get_copyright(full=False, translator=True):
if full:
result = \
"""// Copyright (c) $YEAR$ Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
else:
result = \
"""// Copyright (c) $YEAR$ The Chromium Embedded Framework Authors. All rights
// reserved. Use of this source code is governed by a BSD-style license that
// can be found in the LICENSE file.
"""
if translator:
result += \
"""//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool. If making changes by
// hand only do so within the body of existing method and function
// implementations. See the translator.README.txt file in the tools directory
// for more information.
//
// $hash=$$HASH$$$
//
"""
# add the copyright year
return result.replace('$YEAR$', get_year())
class obj_header:
""" Class representing a C++ header file. """
def __init__(self):
self.filenames = []
self.typedefs = []
self.funcs = []
self.classes = []
self.root_directory = None
def set_root_directory(self, root_directory):
""" Set the root directory. """
self.root_directory = root_directory
def get_root_directory(self):
""" Get the root directory. """
return self.root_directory
def add_directory(self, directory, excluded_files=[]):
""" Add all header files from the specified directory. """
files = get_files(os.path.join(directory, '*.h'))
for file in files:
if len(excluded_files) == 0 or \
not os.path.split(file)[1] in excluded_files:
self.add_file(file)
def add_file(self, filepath):
""" Add a header file. """
if self.root_directory is None:
filename = os.path.split(filepath)[1]
else:
filename = os.path.relpath(filepath, self.root_directory)
filename = filename.replace('\\', '/')
# read the input file into memory
self.add_data(filename, read_file(filepath))
def add_data(self, filename, data):
""" Add header file contents. """
added = False
# remove space from between template definition end brackets
data = data.replace("> >", ">>")
# extract global typedefs
p = re.compile('\ntypedef' + _cre_space + _cre_typedef + ';',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
# build the global typedef objects
for value in list:
pos = value.rfind(' ')
if pos < 0:
raise Exception('Invalid typedef: ' + value)
alias = value[pos + 1:].strip()
value = value[:pos].strip()
self.typedefs.append(obj_typedef(self, filename, value, alias))
# extract global functions
p = re.compile('\n' + _cre_attrib + '\n' + _cre_func + '\((.*?)\)',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the global function objects
for attrib, retval, argval in list:
comment = get_comment(data, retval + '(' + argval + ');')
validate_comment(filename, retval, comment)
self.funcs.append(
obj_function(self, filename, attrib, retval, argval, comment))
# extract includes
p = re.compile('\n#include \"include/' + _cre_cfnameorpath + '.h')
includes = p.findall(data)
# extract forward declarations
p = re.compile('\nclass' + _cre_space + _cre_cfname + ';')
forward_declares = p.findall(data)
# extract empty classes
p = re.compile('\n' + _cre_attrib + '\nclass' + _cre_space + _cre_cfname +
_cre_space + ':' + _cre_space + 'public' + _cre_virtual +
_cre_space + _cre_cfname + _cre_space + '{};',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the class objects
for attrib, name, parent_name in list:
# Style may place the ':' on the next line.
comment = get_comment(data, name + ' :')
if len(comment) == 0:
comment = get_comment(data, name + "\n")
validate_comment(filename, name, comment)
self.classes.append(
obj_class(self, filename, attrib, name, parent_name, "", comment,
includes, forward_declares))
# Remove empty classes from |data| so we don't mess up the non-empty
# class search that follows.
data = p.sub('', data)
# extract classes
p = re.compile('\n' + _cre_attrib + '\nclass' + _cre_space + _cre_cfname +
_cre_space + ':' + _cre_space + 'public' + _cre_virtual +
_cre_space + _cre_cfname + _cre_space + '{(.*?)\n};',
re.MULTILINE | re.DOTALL)
list = p.findall(data)
if len(list) > 0:
added = True
# build the class objects
for attrib, name, parent_name, body in list:
# Style may place the ':' on the next line.
comment = get_comment(data, name + ' :')
if len(comment) == 0:
comment = get_comment(data, name + "\n")
validate_comment(filename, name, comment)
self.classes.append(
obj_class(self, filename, attrib, name, parent_name, body, comment,
includes, forward_declares))
if added:
# a global function or class was read from the header file
self.filenames.append(filename)
def __repr__(self):
result = ''
if len(self.typedefs) > 0:
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += "\n".join(strlist) + "\n\n"
if len(self.funcs) > 0:
strlist = []
for cls in self.funcs:
strlist.append(str(cls))
result += "\n".join(strlist) + "\n\n"
if len(self.classes) > 0:
strlist = []
for cls in self.classes:
strlist.append(str(cls))
result += "\n".join(strlist)
return result
def get_file_names(self):
""" Return the array of header file names. """
return self.filenames
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def get_funcs(self, filename=None):
""" Return the array of function objects. """
if filename is None:
return self.funcs
else:
# only return the functions in the specified file
res = []
for func in self.funcs:
if func.get_file_name() == filename:
res.append(func)
return res
def get_classes(self, filename=None):
""" Return the array of class objects. """
if filename is None:
return self.classes
else:
# only return the classes in the specified file
res = []
for cls in self.classes:
if cls.get_file_name() == filename:
res.append(cls)
return res
def get_class(self, classname, defined_structs=None):
""" Return the specified class or None if not found. """
for cls in self.classes:
if cls.get_name() == classname:
return cls
elif not defined_structs is None:
defined_structs.append(cls.get_capi_name())
return None
def get_class_names(self):
""" Returns the names of all classes in this object. """
result = []
for cls in self.classes:
result.append(cls.get_name())
return result
def get_base_class_name(self, classname):
""" Returns the base (root) class name for |classname|. """
cur_cls = self.get_class(classname)
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
return parent_name
else:
parent_cls = self.get_class(parent_name)
if parent_cls is None:
break
cur_cls = self.get_class(parent_name)
return None
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.classes:
cls.get_types(list)
def get_alias_translation(self, alias):
""" Return a translation of alias to value based on typedef
statements. """
for cls in self.typedefs:
if cls.alias == alias:
return cls.value
return None
def get_analysis(self, value, named=True):
""" Return an analysis of the value based the header file context. """
return obj_analysis([self], value, named)
def get_defined_structs(self):
""" Return a list of already defined structure names. """
return [
'cef_print_info_t', 'cef_window_info_t', 'cef_base_ref_counted_t',
'cef_base_scoped_t'
]
def get_capi_translations(self):
""" Return a dictionary that maps C++ terminology to C API terminology.
"""
# strings that will be changed in C++ comments
map = {
'class': 'structure',
'Class': 'Structure',
'interface': 'structure',
'Interface': 'Structure',
'true': 'true (1)',
'false': 'false (0)',
'empty': 'NULL',
'method': 'function'
}
# add mappings for all classes and functions
funcs = self.get_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
classes = self.get_classes()
for cls in classes:
map[cls.get_name()] = cls.get_capi_name()
funcs = cls.get_virtual_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
funcs = cls.get_static_funcs()
for func in funcs:
map[func.get_name() + '()'] = func.get_capi_name() + '()'
return map
class obj_class:
""" Class representing a C++ class. """
def __init__(self, parent, filename, attrib, name, parent_name, body, comment,
includes, forward_declares):
if not isinstance(parent, obj_header):
raise Exception('Invalid parent object type')
self.parent = parent
self.filename = filename
self.attribs = str_to_dict(attrib)
self.name = name
self.parent_name = parent_name
self.comment = comment
self.includes = includes
self.forward_declares = forward_declares
# extract typedefs
p = re.compile(
'\n' + _cre_space + 'typedef' + _cre_space + _cre_typedef + ';',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the typedef objects
self.typedefs = []
for value in list:
pos = value.rfind(' ')
if pos < 0:
raise Exception('Invalid typedef: ' + value)
alias = value[pos + 1:].strip()
value = value[:pos].strip()
self.typedefs.append(obj_typedef(self, filename, value, alias))
# extract static functions
p = re.compile('\n' + _cre_space + _cre_attrib + '\n' + _cre_space +
'static' + _cre_space + _cre_func + '\((.*?)\)',
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the static function objects
self.staticfuncs = []
for attrib, retval, argval in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.staticfuncs.append(
obj_function_static(self, attrib, retval, argval, comment))
# extract virtual functions
p = re.compile(
'\n' + _cre_space + _cre_attrib + '\n' + _cre_space + 'virtual' +
_cre_space + _cre_func + '\((.*?)\)' + _cre_vfmod,
re.MULTILINE | re.DOTALL)
list = p.findall(body)
# build the virtual function objects
self.virtualfuncs = []
for attrib, retval, argval, vfmod in list:
comment = get_comment(body, retval + '(' + argval + ')')
validate_comment(filename, retval, comment)
self.virtualfuncs.append(
obj_function_virtual(self, attrib, retval, argval, comment,
vfmod.strip()))
def __repr__(self):
result = '/* ' + dict_to_str(
self.attribs) + ' */ class ' + self.name + "\n{"
if len(self.typedefs) > 0:
result += "\n\t"
strlist = []
for cls in self.typedefs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
if len(self.staticfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.staticfuncs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
if len(self.virtualfuncs) > 0:
result += "\n\t"
strlist = []
for cls in self.virtualfuncs:
strlist.append(str(cls))
result += "\n\t".join(strlist)
result += "\n};\n"
return result
def get_file_name(self):
""" Return the C++ header file name. Includes the directory component,
if any. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. Includes the directory component,
if any. """
return get_capi_file_name(self.filename)
def get_file_directory(self):
""" Return the file directory component, if any. """
pos = self.filename.rfind('/')
if pos >= 0:
return self.filename[:pos]
return None
def get_name(self):
""" Return the class name. """
return self.name
def get_capi_name(self):
""" Return the CAPI structure name for this class. """
return get_capi_name(self.name, True)
def get_parent_name(self):
""" Return the parent class name. """
return self.parent_name
def get_parent_capi_name(self):
""" Return the CAPI structure name for the parent class. """
return get_capi_name(self.parent_name, True)
def has_parent(self, parent_name):
""" Returns true if this class has the specified class anywhere in its
inheritance hierarchy. """
# Every class has a known base class as the top-most parent.
if is_base_class(parent_name) or parent_name == self.parent_name:
return True
if is_base_class(self.parent_name):
return False
cur_cls = self.parent.get_class(self.parent_name)
while True:
cur_parent_name = cur_cls.get_parent_name()
if is_base_class(cur_parent_name):
break
elif cur_parent_name == parent_name:
return True
cur_cls = self.parent.get_class(cur_parent_name)
return False
def get_comment(self):
""" Return the class comment as an array of lines. """
return self.comment
def get_includes(self):
""" Return the list of classes that are included from this class'
header file. """
return self.includes
def get_forward_declares(self):
""" Return the list of classes that are forward declared for this
class. """
return self.forward_declares
def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs
def has_attrib(self, name):
""" Return true if the specified attribute exists. """
return name in self.attribs
def get_attrib(self, name):
""" Return the first or only value for specified attribute. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is a list
return self.attribs[name][0]
else:
# the value is a string
return self.attribs[name]
return None
def get_attrib_list(self, name):
""" Return all values for specified attribute as a list. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is already a list
return self.attribs[name]
else:
# convert the value to a list
return [self.attribs[name]]
return None
def get_typedefs(self):
""" Return the array of typedef objects. """
return self.typedefs
def has_typedef_alias(self, alias):
""" Returns true if the specified typedef alias is defined in the scope
of this class declaration. """
for typedef in self.typedefs:
if typedef.get_alias() == alias:
return True
return False
def get_static_funcs(self):
""" Return the array of static function objects. """
return self.staticfuncs
def get_virtual_funcs(self):
""" Return the array of virtual function objects. """
return self.virtualfuncs
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.typedefs:
cls.get_types(list)
for cls in self.staticfuncs:
cls.get_types(list)
for cls in self.virtualfuncs:
cls.get_types(list)
def get_alias_translation(self, alias):
for cls in self.typedefs:
if cls.alias == alias:
return cls.value
return None
def get_analysis(self, value, named=True):
""" Return an analysis of the value based on the class definition
context.
"""
return obj_analysis([self, self.parent], value, named)
def is_library_side(self):
""" Returns true if the class is implemented by the library. """
return self.attribs['source'] == 'library'
def is_client_side(self):
""" Returns true if the class is implemented by the client. """
return self.attribs['source'] == 'client'
class obj_typedef:
""" Class representing a typedef statement. """
def __init__(self, parent, filename, value, alias):
if not isinstance(parent, obj_header) \
and not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
self.parent = parent
self.filename = filename
self.alias = alias
self.value = self.parent.get_analysis(value, False)
def __repr__(self):
return 'typedef ' + self.value.get_type() + ' ' + self.alias + ';'
def get_file_name(self):
""" Return the C++ header file name. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. """
return get_capi_file_name(self.filename)
def get_alias(self):
""" Return the alias. """
return self.alias
def get_value(self):
""" Return an analysis of the value based on the class or header file
definition context.
"""
return self.value
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
name = self.value.get_type()
if not name in list:
list[name] = self.value
class obj_function:
""" Class representing a function. """
def __init__(self, parent, filename, attrib, retval, argval, comment):
self.parent = parent
self.filename = filename
self.attribs = str_to_dict(attrib)
self.retval = obj_argument(self, retval)
self.name = self.retval.remove_name()
self.comment = comment
# build the argument objects
self.arguments = []
arglist = argval.split(',')
argindex = 0
while argindex < len(arglist):
arg = arglist[argindex]
if arg.find('<') >= 0 and arg.find('>') == -1:
# We've split inside of a template type declaration. Join the
# next argument with this argument.
argindex += 1
arg += ',' + arglist[argindex]
arg = arg.strip()
if len(arg) > 0:
argument = obj_argument(self, arg)
if argument.needs_attrib_count_func() and \
argument.get_attrib_count_func() is None:
raise Exception("A 'count_func' attribute is required "+ \
"for the '"+argument.get_name()+ \
"' parameter to "+self.get_qualified_name())
self.arguments.append(argument)
argindex += 1
if self.retval.needs_attrib_default_retval() and \
self.retval.get_attrib_default_retval() is None:
raise Exception("A 'default_retval' attribute is required for "+ \
self.get_qualified_name())
def __repr__(self):
return '/* ' + dict_to_str(self.attribs) + ' */ ' + self.get_cpp_proto()
def get_file_name(self):
""" Return the C++ header file name. """
return self.filename
def get_capi_file_name(self):
""" Return the CAPI header file name. """
return get_capi_file_name(self.filename)
def get_name(self):
""" Return the function name. """
return self.name
def get_qualified_name(self):
""" Return the fully qualified function name. """
if isinstance(self.parent, obj_header):
# global function
return self.name
else:
# member function
return self.parent.get_name() + '::' + self.name
def get_capi_name(self, prefix=None):
""" Return the CAPI function name. """
if 'capi_name' in self.attribs:
return self.attribs['capi_name']
return get_capi_name(self.name, False, prefix)
def get_comment(self):
""" Return the function comment as an array of lines. """
return self.comment
def get_attribs(self):
""" Return all attributes as a dictionary. """
return self.attribs
def has_attrib(self, name):
""" Return true if the specified attribute exists. """
return name in self.attribs
def get_attrib(self, name):
""" Return the first or only value for specified attribute. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is a list
return self.attribs[name][0]
else:
# the value is a string
return self.attribs[name]
return None
def get_attrib_list(self, name):
""" Return all values for specified attribute as a list. """
if name in self.attribs:
if isinstance(self.attribs[name], list):
# the value is already a list
return self.attribs[name]
else:
# convert the value to a list
return [self.attribs[name]]
return None
def get_retval(self):
""" Return the return value object. """
return self.retval
def get_arguments(self):
""" Return the argument array. """
return self.arguments
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
for cls in self.arguments:
cls.get_types(list)
def get_capi_parts(self, defined_structs=[], prefix=None):
""" Return the parts of the C API function definition. """
retval = ''
dict = self.retval.get_type().get_capi(defined_structs)
if dict['format'] == 'single':
retval = dict['value']
name = self.get_capi_name(prefix)
args = []
if isinstance(self, obj_function_virtual):
# virtual functions get themselves as the first argument
str = 'struct _' + self.parent.get_capi_name() + '* self'
if isinstance(self, obj_function_virtual) and self.is_const():
# const virtual functions get const self pointers
str = 'const ' + str
args.append(str)
if len(self.arguments) > 0:
for cls in self.arguments:
type = cls.get_type()
dict = type.get_capi(defined_structs)
if dict['format'] == 'single':
args.append(dict['value'])
elif dict['format'] == 'multi-arg':
# add an additional argument for the size of the array
type_name = type.get_name()
if type.is_const():
# for const arrays pass the size argument by value
args.append('size_t ' + type_name + 'Count')
else:
# for non-const arrays pass the size argument by address
args.append('size_t* ' + type_name + 'Count')
args.append(dict['value'])
return {'retval': retval, 'name': name, 'args': args}
def get_capi_proto(self, defined_structs=[], prefix=None):
""" Return the prototype of the C API function. """
parts = self.get_capi_parts(defined_structs, prefix)
result = parts['retval']+' '+parts['name']+ \
'('+', '.join(parts['args'])+')'
return result
def get_cpp_parts(self, isimpl=False):
""" Return the parts of the C++ function definition. """
retval = str(self.retval)
name = self.name
args = []
if len(self.arguments) > 0:
for cls in self.arguments:
args.append(str(cls))
if isimpl and isinstance(self, obj_function_virtual):
# enumeration return values must be qualified with the class name
# if the type is defined in the class declaration scope.
type = self.get_retval().get_type()
if type.is_result_struct() and type.is_result_struct_enum() and \
self.parent.has_typedef_alias(retval):
retval = self.parent.get_name() + '::' + retval
return {'retval': retval, 'name': name, 'args': args}
def get_cpp_proto(self, classname=None):
""" Return the prototype of the C++ function. """
parts = self.get_cpp_parts()
result = parts['retval'] + ' '
if not classname is None:
result += classname + '::'
result += parts['name'] + '(' + ', '.join(parts['args']) + ')'
if isinstance(self, obj_function_virtual) and self.is_const():
result += ' const'
return result
def is_same_side(self, other_class_name):
""" Returns true if this function is on the same side (library or
client) and the specified class. """
if isinstance(self.parent, obj_class):
# this function is part of a class
this_is_library_side = self.parent.is_library_side()
header = self.parent.parent
else:
# this function is global
this_is_library_side = True
header = self.parent
if is_base_class(other_class_name):
other_is_library_side = False
else:
other_class = header.get_class(other_class_name)
if other_class is None:
raise Exception('Unknown class: ' + other_class_name)
other_is_library_side = other_class.is_library_side()
return other_is_library_side == this_is_library_side
class obj_function_static(obj_function):
""" Class representing a static function. """
def __init__(self, parent, attrib, retval, argval, comment):
if not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
obj_function.__init__(self, parent, parent.filename, attrib, retval, argval,
comment)
def __repr__(self):
return 'static ' + obj_function.__repr__(self) + ';'
def get_capi_name(self, prefix=None):
""" Return the CAPI function name. """
if prefix is None:
# by default static functions are prefixed with the class name
prefix = get_capi_name(self.parent.get_name(), False)
return obj_function.get_capi_name(self, prefix)
class obj_function_virtual(obj_function):
""" Class representing a virtual function. """
def __init__(self, parent, attrib, retval, argval, comment, vfmod):
if not isinstance(parent, obj_class):
raise Exception('Invalid parent object type')
obj_function.__init__(self, parent, parent.filename, attrib, retval, argval,
comment)
if vfmod == 'const':
self.isconst = True
else:
self.isconst = False
def __repr__(self):
return 'virtual ' + obj_function.__repr__(self) + ';'
def is_const(self):
""" Returns true if the method declaration is const. """
return self.isconst
class obj_argument:
""" Class representing a function argument. """
def __init__(self, parent, argval):
if not isinstance(parent, obj_function):
raise Exception('Invalid parent object type')
self.parent = parent
self.type = self.parent.parent.get_analysis(argval)
def __repr__(self):
result = ''
if self.type.is_const():
result += 'const '
result += self.type.get_type()
if self.type.is_byref():
result += '&'
elif self.type.is_byaddr():
result += '*'
if self.type.has_name():
result += ' ' + self.type.get_name()
return result
def get_name(self):
""" Return the name for this argument. """
return self.type.get_name()
def remove_name(self):
""" Remove and return the name value. """
name = self.type.get_name()
self.type.name = None
return name
def get_type(self):
""" Return an analysis of the argument type based on the class
definition context.
"""
return self.type
def get_types(self, list):
""" Return a dictionary mapping data types to analyzed values. """
name = self.type.get_type()
if not name in list:
list[name] = self.type
def needs_attrib_count_func(self):
""" Returns true if this argument requires a 'count_func' attribute. """
# A 'count_func' attribute is required for non-const non-string vector
# attribute types
return self.type.has_name() and \
self.type.is_result_vector() and \
not self.type.is_result_vector_string() and \
not self.type.is_const()
def get_attrib_count_func(self):
""" Returns the count function for this argument. """
# The 'count_func' attribute value format is name:function
if not self.parent.has_attrib('count_func'):
return None
name = self.type.get_name()
vals = self.parent.get_attrib_list('count_func')
for val in vals:
parts = val.split(':')
if len(parts) != 2:
raise Exception("Invalid 'count_func' attribute value for "+ \
self.parent.get_qualified_name()+': '+val)
if parts[0].strip() == name:
return parts[1].strip()
return None
def needs_attrib_default_retval(self):
""" Returns true if this argument requires a 'default_retval' attribute.
"""
# A 'default_retval' attribute is required for enumeration return value
# types.
return not self.type.has_name() and \
self.type.is_result_struct() and \
self.type.is_result_struct_enum()
def get_attrib_default_retval(self):
""" Returns the defualt return value for this argument. """
return self.parent.get_attrib('default_retval')
def get_arg_type(self):
""" Returns the argument type as defined in translator.README.txt. """
if not self.type.has_name():
raise Exception('Cannot be called for retval types')
# simple or enumeration type
if (self.type.is_result_simple() and \
self.type.get_type() != 'bool') or \
(self.type.is_result_struct() and \
self.type.is_result_struct_enum()):
if self.type.is_byref():
if self.type.is_const():
return 'simple_byref_const'
return 'simple_byref'
elif self.type.is_byaddr():
return 'simple_byaddr'
return 'simple_byval'
# boolean type
if self.type.get_type() == 'bool':
if self.type.is_byref():
return 'bool_byref'
elif self.type.is_byaddr():
return 'bool_byaddr'
return 'bool_byval'
# structure type
if self.type.is_result_struct() and self.type.is_byref():
if self.type.is_const():
return 'struct_byref_const'
return 'struct_byref'
# string type
if self.type.is_result_string() and self.type.is_byref():
if self.type.is_const():
return 'string_byref_const'
return 'string_byref'
# *ptr type
if self.type.is_result_ptr():
prefix = self.type.get_result_ptr_type_prefix()
same_side = self.parent.is_same_side(self.type.get_ptr_type())
if self.type.is_byref():
if same_side:
return prefix + 'ptr_same_byref'
return prefix + 'ptr_diff_byref'
if same_side:
return prefix + 'ptr_same'
return prefix + 'ptr_diff'
if self.type.is_result_vector():
# all vector types must be passed by reference
if not self.type.is_byref():
return 'invalid'
if self.type.is_result_vector_string():
# string vector type
if self.type.is_const():
return 'string_vec_byref_const'
return 'string_vec_byref'
if self.type.is_result_vector_simple():
if self.type.get_vector_type() != 'bool':
# simple/enumeration vector types
if self.type.is_const():
return 'simple_vec_byref_const'
return 'simple_vec_byref'
# boolean vector types
if self.type.is_const():
return 'bool_vec_byref_const'
return 'bool_vec_byref'
if self.type.is_result_vector_ptr():
# *ptr vector types
prefix = self.type.get_result_vector_ptr_type_prefix()
same_side = self.parent.is_same_side(self.type.get_ptr_type())
if self.type.is_const():
if same_side:
return prefix + 'ptr_vec_same_byref_const'
return prefix + 'ptr_vec_diff_byref_const'
if same_side:
return prefix + 'ptr_vec_same_byref'
return prefix + 'ptr_vec_diff_byref'
# string single map type
if self.type.is_result_map_single():
if not self.type.is_byref():
return 'invalid'
if self.type.is_const():
return 'string_map_single_byref_const'
return 'string_map_single_byref'
# string multi map type
if self.type.is_result_map_multi():
if not self.type.is_byref():
return 'invalid'
if self.type.is_const():
return 'string_map_multi_byref_const'
return 'string_map_multi_byref'
return 'invalid'
def get_retval_type(self):
""" Returns the retval type as defined in translator.README.txt. """
if self.type.has_name():
raise Exception('Cannot be called for argument types')
# unsupported modifiers
if self.type.is_const() or self.type.is_byref() or \
self.type.is_byaddr():
return 'invalid'
# void types don't have a return value
if self.type.get_type() == 'void':
return 'none'
if (self.type.is_result_simple() and \
self.type.get_type() != 'bool') or \
(self.type.is_result_struct() and self.type.is_result_struct_enum()):
return 'simple'
if self.type.get_type() == 'bool':
return 'bool'
if self.type.is_result_string():
return 'string'
if self.type.is_result_ptr():
prefix = self.type.get_result_ptr_type_prefix()
if self.parent.is_same_side(self.type.get_ptr_type()):
return prefix + 'ptr_same'
else:
return prefix + 'ptr_diff'
return 'invalid'
def get_retval_default(self, for_capi):
""" Returns the default return value based on the retval type. """
# start with the default retval attribute, if any.
retval = self.get_attrib_default_retval()
if not retval is None:
if for_capi:
# apply any appropriate C API translations.
if retval == 'true':
return '1'
if retval == 'false':
return '0'
return retval
# next look at the retval type value.
type = self.get_retval_type()
if type == 'simple':
return self.get_type().get_result_simple_default()
elif type == 'bool':
if for_capi:
return '0'
return 'false'
elif type == 'string':
if for_capi:
return 'NULL'
return 'CefString()'
elif type == 'refptr_same' or type == 'refptr_diff' or \
type == 'rawptr_same' or type == 'rawptr_diff':
if for_capi:
return 'NULL'
return 'nullptr'
elif type == 'ownptr_same' or type == 'ownptr_diff':
if for_capi:
return 'NULL'
return 'CefOwnPtr<' + self.type.get_ptr_type() + '>()'
return ''
class obj_analysis:
""" Class representing an analysis of a data type value. """
def __init__(self, scopelist, value, named):
self.value = value
self.result_type = 'unknown'
self.result_value = None
self.result_default = None
self.ptr_type = None
# parse the argument string
partlist = value.strip().split()
if named == True:
# extract the name value
self.name = partlist[-1]
del partlist[-1]
else:
self.name = None
if len(partlist) == 0:
raise Exception('Invalid argument value: ' + value)
# check const status
if partlist[0] == 'const':
self.isconst = True
del partlist[0]
else:
self.isconst = False
if len(partlist) == 0:
raise Exception('Invalid argument value: ' + value)
# combine the data type
self.type = ' '.join(partlist)
# extract the last character of the data type
endchar = self.type[-1]
# check if the value is passed by reference
if endchar == '&':
self.isbyref = True
self.type = self.type[:-1]
else:
self.isbyref = False
# check if the value is passed by address
if endchar == '*':
self.isbyaddr = True
self.type = self.type[:-1]
else:
self.isbyaddr = False
# see if the value is directly identifiable
if self._check_advanced(self.type) == True:
return
# not identifiable, so look it up
translation = None
for scope in scopelist:
if not isinstance(scope, obj_header) \
and not isinstance(scope, obj_class):
raise Exception('Invalid scope object type')
translation = scope.get_alias_translation(self.type)
if not translation is None:
break
if translation is None:
raise Exception('Failed to translate type: ' + self.type)
# the translation succeeded so keep the result
self.result_type = translation.result_type
self.result_value = translation.result_value
def _check_advanced(self, value):
# check for vectors
if value.find('std::vector') == 0:
self.result_type = 'vector'
val = value[12:-1].strip()
self.result_value = [self._get_basic(val)]
self.result_value[0]['vector_type'] = val
return True
# check for maps
if value.find('std::map') == 0:
self.result_type = 'map'
vals = value[9:-1].split(',')
if len(vals) == 2:
self.result_value = [
self._get_basic(vals[0].strip()),
self._get_basic(vals[1].strip())
]
return True
# check for multimaps
if value.find('std::multimap') == 0:
self.result_type = 'multimap'
vals = value[14:-1].split(',')
if len(vals) == 2:
self.result_value = [
self._get_basic(vals[0].strip()),
self._get_basic(vals[1].strip())
]
return True
# check for basic types
basic = self._get_basic(value)
if not basic is None:
self.result_type = basic['result_type']
self.result_value = basic['result_value']
if 'ptr_type' in basic:
self.ptr_type = basic['ptr_type']
if 'result_default' in basic:
self.result_default = basic['result_default']
return True
return False
def _get_basic(self, value):
# check for string values
if value == "CefString":
return {'result_type': 'string', 'result_value': None}
# check for simple direct translations
if value in _simpletypes.keys():
return {
'result_type': 'simple',
'result_value': _simpletypes[value][0],
'result_default': _simpletypes[value][1],
}
# check if already a C API structure
if value[-2:] == '_t':
return {'result_type': 'structure', 'result_value': value}
# check for CEF reference pointers
p = re.compile('^CefRefPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'refptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF owned pointers
p = re.compile('^CefOwnPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'ownptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF raw pointers
p = re.compile('^CefRawPtr<(.*?)>$', re.DOTALL)
list = p.findall(value)
if len(list) == 1:
return {
'result_type': 'rawptr',
'result_value': get_capi_name(list[0], True) + '*',
'ptr_type': list[0]
}
# check for CEF structure types
if value[0:3] == 'Cef' and value[-4:] != 'List':
return {
'result_type': 'structure',
'result_value': get_capi_name(value, True)
}
return None
def __repr__(self):
return '(' + self.result_type + ') ' + str(self.result_value)
def has_name(self):
""" Returns true if a name value exists. """
return (not self.name is None)
def get_name(self):
""" Return the name. """
return self.name
def get_value(self):
""" Return the C++ value (type + name). """
return self.value
def get_type(self):
""" Return the C++ type. """
return self.type
def get_ptr_type(self):
""" Return the C++ class type referenced by a CefRefPtr. """
if self.is_result_vector() and self.is_result_vector_ptr():
# return the vector RefPtr type
return self.result_value[0]['ptr_type']
# return the basic RefPtr type
return self.ptr_type
def get_vector_type(self):
""" Return the C++ class type referenced by a std::vector. """
if self.is_result_vector():
return self.result_value[0]['vector_type']
return None
def is_const(self):
""" Returns true if the argument value is constant. """
return self.isconst
def is_byref(self):
""" Returns true if the argument is passed by reference. """
return self.isbyref
def is_byaddr(self):
""" Returns true if the argument is passed by address. """
return self.isbyaddr
def is_result_simple(self):
""" Returns true if this is a simple argument type. """
return (self.result_type == 'simple')
def get_result_simple_type_root(self):
""" Return the simple structure or basic type name. """
return self.result_value
def get_result_simple_type(self):
""" Return the simple type. """
result = ''
if self.is_const():
result += 'const '
result += self.result_value
if self.is_byaddr() or self.is_byref():
result += '*'
return result
def get_result_simple_default(self):
""" Return the default value fo the basic type. """
return self.result_default
def is_result_ptr(self):
""" Returns true if this is a *Ptr type. """
return self.is_result_refptr() or self.is_result_ownptr() or \
self.is_result_rawptr()
def get_result_ptr_type_root(self):
""" Return the *Ptr type structure name. """
return self.result_value[:-1]
def get_result_ptr_type(self, defined_structs=[]):
""" Return the *Ptr type. """
result = ''
if not self.result_value[:-1] in defined_structs:
result += 'struct _'
result += self.result_value
if self.is_byref() or self.is_byaddr():
result += '*'
return result
def get_result_ptr_type_prefix(self):
""" Returns the *Ptr type prefix. """
if self.is_result_refptr():
return 'ref'
if self.is_result_ownptr():
return 'own'
if self.is_result_rawptr():
return 'raw'
raise Exception('Not a pointer type')
def is_result_refptr(self):
""" Returns true if this is a RefPtr type. """
return (self.result_type == 'refptr')
def is_result_ownptr(self):
""" Returns true if this is a OwnPtr type. """
return (self.result_type == 'ownptr')
def is_result_rawptr(self):
""" Returns true if this is a RawPtr type. """
return (self.result_type == 'rawptr')
def is_result_struct(self):
""" Returns true if this is a structure type. """
return (self.result_type == 'structure')
def is_result_struct_enum(self):
""" Returns true if this struct type is likely an enumeration. """
# structure values that are passed by reference or address must be
# structures and not enumerations
if not self.is_byref() and not self.is_byaddr():
return True
return False
def get_result_struct_type(self, defined_structs=[]):
""" Return the structure or enumeration type. """
result = ''
is_enum = self.is_result_struct_enum()
if not is_enum:
if self.is_const():
result += 'const '
if not self.result_value in defined_structs:
result += 'struct _'
result += self.result_value
if not is_enum:
result += '*'
return result
def is_result_string(self):
""" Returns true if this is a string type. """
return (self.result_type == 'string')
def get_result_string_type(self):
""" Return the string type. """
if not self.has_name():
# Return values are string structs that the user must free. Use
# the name of the structure as a hint.
return 'cef_string_userfree_t'
elif not self.is_const() and (self.is_byref() or self.is_byaddr()):
# Parameters passed by reference or address. Use the normal
# non-const string struct.
return 'cef_string_t*'
# Const parameters use the const string struct.
return 'const cef_string_t*'
def is_result_vector(self):
""" Returns true if this is a vector type. """
return (self.result_type == 'vector')
def is_result_vector_string(self):
""" Returns true if this is a string vector. """
return self.result_value[0]['result_type'] == 'string'
def is_result_vector_simple(self):
""" Returns true if this is a string vector. """
return self.result_value[0]['result_type'] == 'simple'
def is_result_vector_ptr(self):
""" Returns true if this is a *Ptr vector. """
return self.is_result_vector_refptr() or \
self.is_result_vector_ownptr() or \
self.is_result_vector_rawptr()
def get_result_vector_ptr_type_prefix(self):
""" Returns the *Ptr type prefix. """
if self.is_result_vector_refptr():
return 'ref'
if self.is_result_vector_ownptr():
return 'own'
if self.is_result_vector_rawptr():
return 'raw'
raise Exception('Not a pointer type')
def is_result_vector_refptr(self):
""" Returns true if this is a RefPtr vector. """
return self.result_value[0]['result_type'] == 'refptr'
def is_result_vector_ownptr(self):
""" Returns true if this is a OwnPtr vector. """
return self.result_value[0]['result_type'] == 'ownptr'
def is_result_vector_rawptr(self):
""" Returns true if this is a RawPtr vector. """
return self.result_value[0]['result_type'] == 'rawptr'
def get_result_vector_type_root(self):
""" Return the vector structure or basic type name. """
return self.result_value[0]['result_value']
def get_result_vector_type(self, defined_structs=[]):
""" Return the vector type. """
if not self.has_name():
raise Exception('Cannot use vector as a return type')
type = self.result_value[0]['result_type']
value = self.result_value[0]['result_value']
result = {}
if type == 'string':
result['value'] = 'cef_string_list_t'
result['format'] = 'single'
return result
if type == 'simple':
str = value
if self.is_const():
str += ' const'
str += '*'
result['value'] = str
elif type == 'refptr' or type == 'ownptr' or type == 'rawptr':
str = ''
if not value[:-1] in defined_structs:
str += 'struct _'
str += value
if self.is_const():
str += ' const'
str += '*'
result['value'] = str
else:
raise Exception('Unsupported vector type: ' + type)
# vector values must be passed as a value array parameter
# and a size parameter
result['format'] = 'multi-arg'
return result
def is_result_map(self):
""" Returns true if this is a map type. """
return (self.result_type == 'map' or self.result_type == 'multimap')
def is_result_map_single(self):
""" Returns true if this is a single map type. """
return (self.result_type == 'map')
def is_result_map_multi(self):
""" Returns true if this is a multi map type. """
return (self.result_type == 'multimap')
def get_result_map_type(self, defined_structs=[]):
""" Return the map type. """
if not self.has_name():
raise Exception('Cannot use map as a return type')
if self.result_value[0]['result_type'] == 'string' \
and self.result_value[1]['result_type'] == 'string':
if self.result_type == 'map':
return {'value': 'cef_string_map_t', 'format': 'single'}
elif self.result_type == 'multimap':
return {'value': 'cef_string_multimap_t', 'format': 'multi'}
raise Exception('Only mappings of strings to strings are supported')
def get_capi(self, defined_structs=[]):
""" Format the value for the C API. """
result = ''
format = 'single'
if self.is_result_simple():
result += self.get_result_simple_type()
elif self.is_result_ptr():
result += self.get_result_ptr_type(defined_structs)
elif self.is_result_struct():
result += self.get_result_struct_type(defined_structs)
elif self.is_result_string():
result += self.get_result_string_type()
elif self.is_result_map():
resdict = self.get_result_map_type(defined_structs)
if resdict['format'] == 'single' or resdict['format'] == 'multi':
result += resdict['value']
else:
raise Exception('Unsupported map type')
elif self.is_result_vector():
resdict = self.get_result_vector_type(defined_structs)
if resdict['format'] != 'single':
format = resdict['format']
result += resdict['value']
if self.has_name():
result += ' ' + self.get_name()
return {'format': format, 'value': result}
# test the module
if __name__ == "__main__":
import pprint
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) != 2:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <directory>')
sys.exit()
pp = pprint.PrettyPrinter(indent=4)
# create the header object
header = obj_header()
header.add_directory(sys.argv[1])
# output the type mapping
types = {}
header.get_types(types)
pp.pprint(types)
sys.stdout.write('\n')
# output the parsed C++ data
sys.stdout.write(str(header))
# output the C API formatted data
defined_names = header.get_defined_structs()
result = ''
# global functions
funcs = header.get_funcs()
if len(funcs) > 0:
for func in funcs:
result += func.get_capi_proto(defined_names) + ';\n'
result += '\n'
classes = header.get_classes()
for cls in classes:
# virtual functions are inside a structure
result += 'struct ' + cls.get_capi_name() + '\n{\n'
funcs = cls.get_virtual_funcs()
if len(funcs) > 0:
for func in funcs:
result += '\t' + func.get_capi_proto(defined_names) + ';\n'
result += '}\n\n'
defined_names.append(cls.get_capi_name())
# static functions become global
funcs = cls.get_static_funcs()
if len(funcs) > 0:
for func in funcs:
result += func.get_capi_proto(defined_names) + ';\n'
result += '\n'
sys.stdout.write(result)
| [
"magreenblatt@gmail.com"
] | magreenblatt@gmail.com |
85670d26e24b6401659b0d9ff893326cd87d1fd6 | fd8024aa9d0995caf60b05ee8249340dda49479e | /dynamite-remote/dynamite_remote/utilities.py | 3d87506adedf349009df9400e77e7f9f09be2746 | [] | no_license | DynamiteAI/utilities | d8c5088aeea8d6e4cbc5f1656b1374362f6bd571 | 5cbfebd92b6376db20708d1363079d0974f6a029 | refs/heads/master | 2023-08-11T17:17:28.453094 | 2021-10-01T20:25:09 | 2021-10-01T20:25:09 | 358,475,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,615 | py | import os
import tarfile
import subprocess
from typing import Optional, Tuple, Union
USER_HOME = os.environ.get("HOME")
LOCK_PATH = f'{USER_HOME}/.dynamite_remote/locks'
REMOTE_SSH_USER = 'dynamite-remote'
class NodeLocked(Exception):
"""
Thrown a remote node is already running a command
"""
def __init__(self, hostname, command):
msg = f'{hostname.strip()} is already running \'{command.strip()}\''
super(NodeLocked, self).__init__(msg)
def create_new_remote_keypair(node_name) -> Tuple[int, str, str]:
temp_key_root = '/tmp/dynamite-remote/keys/'
makedirs(temp_key_root)
p = subprocess.Popen(
f'cat /dev/zero | ssh-keygen -t rsa -b 4096 -f {temp_key_root}/{node_name} -N ""', shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = p.communicate()
return p.returncode, out.decode('utf-8'), err.decode('utf-8')
def execute_over_ssh(*args):
cmd = ['bash', f'{os.environ.get("HOME")}/.dynamite_remote/bin/ssh_wrapper.sh']
cmd.extend(args)
ssh_subprocess = subprocess.Popen(cmd)
ssh_subprocess.communicate()
def execute_dynamite_command_on_remote_host(host_or_ip: str, port: int, private_key_path: str,
*dynamite_arguments):
makedirs(LOCK_PATH)
def is_locked():
return host_or_ip in os.listdir(LOCK_PATH)
remote_cmd = [f'{REMOTE_SSH_USER}@{host_or_ip}', '-p', str(port), '-t', '-i', private_key_path]
local_command = ['sudo', '/usr/local/bin/dynamite']
local_command.extend(dynamite_arguments)
remote_cmd.extend(local_command)
if is_locked():
with open(f'{LOCK_PATH}/{host_or_ip}') as node_lock:
command = node_lock.read()
raise NodeLocked(host_or_ip, command)
execute_over_ssh(*remote_cmd)
def extract_archive(archive_path: str, destination_path: str) -> None:
"""Extract a tar.gz archive to a given destination path.
Args:
archive_path: The full path to the tar.gz archive file
destination_path: The path where the archive will be extracted
Returns:
None
"""
try:
tf = tarfile.open(archive_path)
tf.extractall(path=destination_path)
except IOError:
pass
def makedirs(path: str, exist_ok: Optional[bool] = True) -> None:
"""Create directory(ies) at a given path
Args:
path: The path to the directories
exist_ok: If it exists, create anyway (Default value = True)
Returns:
None
"""
if exist_ok:
os.makedirs(path, exist_ok=True)
else:
os.makedirs(path)
def safely_remove_file(path: str) -> None:
"""Remove a file if it exists at the given path
Args:
path: The path of the file to remove
Returns:
None
"""
if os.path.exists(path):
os.remove(path)
def set_permissions_of_file(file_path: str, unix_permissions_integer: Union[str, int]) -> None:
"""Set the permissions of a file to unix_permissions_integer
Args:
file_path: The path to the file
unix_permissions_integer: The numeric representation of user/group/everyone permissions on a file
Returns:
None
"""
subprocess.call('chmod -R {} {}'.format(unix_permissions_integer, file_path), shell=True)
def search_for_config():
locations = [f'{os.environ.get("HOME")}/.dynamite_remote/config.cfg',
'/etc/dynamite-remote/config.cfg',
'../config.cfg', './config.cfg']
for fp in locations:
if os.path.exists(fp):
return fp
return None
| [
"jamin@dynamite.ai"
] | jamin@dynamite.ai |
e1655716d0d948d2cb64544b273a2246f9206b96 | a239382d2752cb9d04979c1eebfbfb46bd11329a | /face_detection_project_.py | 996ac0c233f83db38ccc52cd44fec7b76207c98f | [] | no_license | Gurwinder-Kaur98/facial-recognition-system-project | c15988257ffbb563f18e50f0ec2bc6886a452c89 | fa660ab7dea2cb8162a1485d98c7aef2c9fb6378 | refs/heads/main | 2023-03-23T15:29:11.755162 | 2021-03-19T09:20:53 | 2021-03-19T09:20:53 | 349,356,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,459 | py | '''SINCE DATABASE IS GIVING US QUICK ACCESS TO PICTURES AND GETTING DATA IS MUCH EASIER IN DATABASE THAN LISTS
THAT'S WHY WE ARE USING MYSQL DATABSE TO STORE USERS DATA LIKE IMAGES,NAME,ID AND ATTENDENCE DATA'''
import mysql.connector as sql
import os
import cv2
import numpy as np
from datetime import date
from datetime import datetime
import face_recognition
import csv
#connecting with MYSQL DATABASE
connection=sql.connect(
host='localhost',
user='root',
password='*****'
)
cursor=connection.cursor(buffered=True)
#CREATING DATABASE AND TABLES TO STORE DATA
'''
cursor.execute('CREATE DATABASE pics_of_permitted_users ')
cursor.execute('USE pics_of_permitted_users')
cursor.execute('CREATE TABLE permitted_users_inform(id INT AUTO_INCREMENT PRIMARY KEY,name VARCHAR(255),image_path VARCHAR(1024))')
cursor.execute('CREATE TABLE Attendence_Record_Table(date VARCHAR(255),name VARCHAR(255),time VARCHAR(255))')
'''
# STORING NAMES AND IMAGES PATH INTO TABLE
'''
path=' write your location of images of permitted users'
for name in mylist:
pic_path= path+'/'+name
name_of_user=os.path.splitext(name)[0]
query='INSERT INTO permitted_users_inform(name,image_path) VALUES(%s,%s)'
val=(name_of_user,pic_path)
cursor.execute(query,val)
cursor.execute("SELECT * FROM permitted_users_inform")
'''
# READING AND ENCODING ALL THE IMAGES OF USERS
cursor.execute('USE pics_of_permitted_users')
cursor.execute("SELECT * FROM permitted_users_inform")
# ENCODING THE IMAGES
encoded_images_list=[]
for touple in cursor:
curr_img=cv2.imread(touple[2]) # GETTING THE INFORMATION OF IMAGES
curr_img=cv2.cvtColor(curr_img,cv2.COLOR_BGR2RGB)#CONVERTING THE IMAGES INO rgb
encode=face_recognition.face_encodings(curr_img)[0]
encoded_images_list.append(encode)
# getting data from webcam
print('encoding has been done')
# CREATING FUNCTION WHICH WILL RECORD THE ATTENDENCE DATA
def Attendence(user_name) :
cursor.execute(' USE pics_of_permitted_users')
dt=date.today()
dtt=dt.strftime("%Y-%m-%d")
now=datetime.now()
tt=now.strftime("%H:%M:%S")
query='SELECT name FROM Attendence_Record_Table WHERE name= %s AND date=%s'
val=(user_name,dtt)
cursor.execute(query,val)
result=cursor.fetchone()
if result is None:
query='INSERT INTO Attendence_Record_Table(date,name,time) VALUES(%s,%s,%s)'
val=(dtt,user_name,tt)
cursor.execute(query,val)
print('Attendence Recorded')
connection.commit()
cap=cv2.VideoCapture(0)
while True:
success,img=cap.read()
reduced_size_img=cv2.resize(img,(0,0),None,0.25,0.25) # RESIZING THE IMAGE CAPTURED BY WEBCAM
reduced_size_imag=cv2.cvtColor(reduced_size_img,cv2.COLOR_BGR2RGB)#CONVERTING IT INTO RGB SINCE COLOURS ARE NOT REQUIRED IN IMAGE
face_frame=face_recognition.face_locations(reduced_size_imag)
encodedframe=face_recognition.face_encodings(reduced_size_imag,face_frame)#ENCODINGS THE IMAGE CAPTURED BY WEBCAM
for encodeface,faceloc in zip(encodedframe,face_frame):
matching=face_recognition.compare_faces(encoded_images_list,encodeface)
faceDis=face_recognition.face_distance(encoded_images_list,encodeface)
matchingindex=np.argmin(faceDis)
matching_index=matchingindex
matchingindex+=1
if matching[matching_index]:
query='SELECT name FROM permitted_users_inform WHERE id = %s'
match_index1=matchingindex.item()
cursor.execute(query,(match_index1,))
name_of_user=cursor.fetchone()[0]
name=str(name_of_user).upper() # STORING ATTENDENCE OF PERSON IN ATTENDENCE TABLE
print(name)
# CREATING RECTANGLE OVER THE FACE OF PREDICTED USER AND SHOWING HIS/HER NAME
y1,x2,y2,x1=faceloc
y1,x2,y2,x1=y1*4,x2*4,y2*4,x1*4 # multiplying with 4 because we are restoring the size
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),2)
Attendence(name)
connection.commit()
cv2.imshow('webcam',img)
cv2.waitKey(1)
''''
# WE CAN ALSO MAKE A EXCEL FILE AFTER COMPLETING THE ATTENDENCE
cursor.execute('SELECT * FROM Attendence_Record_Table')
with open ("Attendencefile.csv",'w',newline='')as csv_file:
csv_writer=csv.writer(csv_file)
csv_writer.writerow([i[0] for i in cursor.description])
csv_writer.writerows(cursor)
'''
''' we can also use twilio to message the users when they enter.So that if somehow wrong person enter in the room
instead of authorised person than the authorised person will came to know through message and report the issue
'''
'''
import twilio
from twilio.rest import Client
account_sid = '******2bc5988a68276815ddae526ef7b4'
auth_token = '*******77934b8be9963b22cdac9a74c'
client = Client(account_sid,auth_token )
message = client.messages.create(body ="Your attendence is marked", from_ = '+***8491', to ='+*****9898')
'''
| [
"noreply@github.com"
] | Gurwinder-Kaur98.noreply@github.com |
bcd0cf676cbc9d4bdeeca3cf67c6ecefb10024d2 | 4f12100a5f1d99a67e2119724b4f23bd54e4df3b | /sistemaDePersonasWS/sistemaDePersonasWS/sistemaDePersonasWS/wsgi.py | 06543e622952e5c6f424bb3010f5f01e611bf982 | [] | no_license | javierperini/SistemaDePersonas | d6adc21528fd057a1a6580008589b146f27446f0 | 65537e03dd088117901eb470e7a1dbf3046c2bcf | refs/heads/master | 2020-03-16T13:06:46.727122 | 2018-05-10T03:37:16 | 2018-05-13T23:41:57 | 132,681,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | """
WSGI config for sistemaDePersonasWS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sistemaDePersonasWS.settings")
application = get_wsgi_application()
| [
"javierperini90@gmail.com"
] | javierperini90@gmail.com |
e2ec8e1807b2ada32487f68445c59d81a1985ee4 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_update_material_bind_response_wrapper_body.py | 0b1aeca6da72b0508699b95f4a1b46ff5039dc1e | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 996 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.materialbindmod.model.material_bind_update_response import MaterialBindUpdateResponse
globals()['MaterialBindUpdateResponse'] = MaterialBindUpdateResponse
from baiduads.materialbindmod.model.update_material_bind_response_wrapper_body import UpdateMaterialBindResponseWrapperBody
class TestUpdateMaterialBindResponseWrapperBody(unittest.TestCase):
"""UpdateMaterialBindResponseWrapperBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateMaterialBindResponseWrapperBody(self):
"""Test UpdateMaterialBindResponseWrapperBody"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateMaterialBindResponseWrapperBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"v_wangzichen02@baidu.com"
] | v_wangzichen02@baidu.com |
c710a8f96c892727fa5bba29bf8a3a2db1cff76c | 9a07514a9942303d96031b002b4f28ef248fe689 | /app/page/main.py | 392ae1d6191abb895cd03f46c59ac479abb3ae25 | [] | no_license | zhousk/hogwarts | 58a257c162331b7e262db88fe308312e2d390101 | 93d38d33fb5b4cfa52649c5101d1a325c56d64ca | refs/heads/master | 2023-02-14T00:26:32.820142 | 2021-01-04T17:09:01 | 2021-01-04T17:09:01 | 307,015,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | from appium.webdriver.common.mobileby import MobileBy
from app.page.contact_page import ContactPage
from app.page.base_page import BasePage
class Main(BasePage):
_contact_list = (MobileBy.XPATH, "//*[@text='通讯录']")
def goto_contact_page(self):
'''
进入到通讯录
'''
# 点击【通讯录】
# *号起到拆分元组的作用
self.find(*self._contact_list).click()
return ContactPage(self.driver)
| [
"980692186@qq.com"
] | 980692186@qq.com |
15de457978364d20c77a1b440c0744b05fc37897 | 79c2638dba0f7efe1530a08be0abd9ccc86de14a | /leagues/views.py | ce4bba6f5a98690911dbc3278fe9aebc4601045f | [] | no_license | cristianrdev/BD_teams_leagues_players | 06d0e505a264cc1492d87e61b362031baa0427f2 | 49868c90e5cf5437009c2fbe012efdcfaae83866 | refs/heads/main | 2023-04-14T20:31:52.968883 | 2021-04-30T04:51:10 | 2021-04-30T04:51:10 | 363,029,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | from django.shortcuts import render, redirect
from .models import League, Team, Player
from . import team_maker
def index(request):
if request.method == 'GET':
print('------------es un GET-----------------')
context = {
"leagues": League.objects.all(),
"teams": Team.objects.all(),
"players": Player.objects.all(),
"title_leagues": '',
"title_teams": '',
"title_players": '',
}
return render(request, "leagues/index.html", context)
else:
print('------------es un POST-----------------')
# if request.POST['filtro'] == '0' :
# print('Volver a el index***********')
# return redirect("/")
if int(request.POST['filtro']) == 1 :
print('es el filtro 1***********')
context = {
"leagues": League.objects.filter(sport='Baseball'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 2 :
print('es el filtro 2***********')
context = {
"leagues": League.objects.filter(name__contains='Women'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 3 :
print('es el filtro 3***********')
context = {
"leagues": League.objects.filter(sport__contains='Hockey'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 4 :
print('es el filtro 4***********')
context = {
"leagues": League.objects.exclude(sport='Football'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 5 :
print('es el filtro 5***********')
context = {
"leagues": League.objects.filter(name__contains='Conference'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 6 :
print('es el filtro 6***********')
context = {
"leagues": League.objects.filter(name__contains='Atlantic'),
"title_teams": 'hidden',
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 7 :
print('es el filtro 7***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(location='Dallas'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 8 :
print('es el filtro 8***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(team_name='Raptors'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 9 :
print('es el filtro 9***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(location__contains='City'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 10 :
print('es el filtro 10***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.filter(team_name__startswith='T'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 11 :
print('es el filtro 11***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.all().order_by('location'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 12 :
print('es el filtro 12***********')
context = {
"title_leagues": 'hidden',
"teams": Team.objects.all().order_by('-team_name'),
"title_players": 'hidden',
}
if int(request.POST['filtro']) == 13 :
print('es el filtro 13***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(last_name = 'Cooper'),
}
if int(request.POST['filtro']) == 14 :
print('es el filtro 14***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(first_name = 'Joshua'),
}
if int(request.POST['filtro']) == 15 :
print('es el filtro 15***********')
# jugadores_cooper = Player.objects.
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(last_name = 'Cooper').exclude(first_name = 'Joshua'),
}
if int(request.POST['filtro']) == 16 :
print('es el filtro 16***********')
context = {
"title_leagues": 'hidden',
"title_teams": 'hidden',
"players": Player.objects.filter(first_name__in = ['Alexander' , 'Wyatt']),
}
return render(request, "leagues/index.html", context)
def make_data(request):
team_maker.gen_leagues(10)
team_maker.gen_teams(50)
team_maker.gen_players(200)
return redirect("index") | [
"crrojasserrano@gmail.com"
] | crrojasserrano@gmail.com |
c94623fa4a303341d2a14bd2502ddbb12809ef67 | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /metadata-ingestion/tests/integration/ldap/test_ldap.py | 3e76f13fc823d2cba27669df218aeac46589492f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 5,662 | py | import time
import pytest
from datahub.ingestion.run.pipeline import Pipeline
from tests.test_helpers import mce_helpers
from tests.test_helpers.docker_helpers import wait_for_port
@pytest.mark.integration
def test_ldap_ingest(docker_compose_runner, pytestconfig, tmp_path, mock_time):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
# without this ldap server can provide empty results
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"group_attrs_map": {
"members": "memberUid",
},
"custom_props_list": ["givenName"],
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_mces.json",
golden_path=test_resources_dir / "ldap_mces_golden.json",
)
@pytest.mark.integration
def test_ldap_memberof_ingest(docker_compose_runner, pytestconfig, tmp_path, mock_time):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
# without this ldap server can provide empty results
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"filter": "(memberOf=cn=HR Department,dc=example,dc=org)",
"attrs_list": ["+", "*"],
"group_attrs_map": {
"members": "member",
},
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_memberof_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_memberof_mces.json",
golden_path=test_resources_dir / "ldap_memberof_mces_golden.json",
)
@pytest.mark.integration
def test_ldap_ingest_with_email_as_username(
docker_compose_runner, pytestconfig, tmp_path, mock_time
):
test_resources_dir = pytestconfig.rootpath / "tests/integration/ldap"
with docker_compose_runner(
test_resources_dir / "docker-compose.yml", "ldap"
) as docker_services:
# The openldap container loads the sample data after exposing the port publicly. As such,
# we must wait a little bit extra to ensure that the sample data is loaded.
wait_for_port(docker_services, "openldap", 389)
time.sleep(5)
pipeline = Pipeline.create(
{
"run_id": "ldap-test",
"source": {
"type": "ldap",
"config": {
"ldap_server": "ldap://localhost",
"ldap_user": "cn=admin,dc=example,dc=org",
"ldap_password": "admin",
"base_dn": "dc=example,dc=org",
"user_attrs_map": {"email": "mail"},
"group_attrs_map": {
"members": "memberUid",
"email": "mail",
},
"use_email_as_username": True,
"custom_props_list": ["givenName"],
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/ldap_mces.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "ldap_mces.json",
golden_path=test_resources_dir / "ldap_mces_golden.json",
)
| [
"noreply@github.com"
] | RyanHolstien.noreply@github.com |
4c45f08b05b3d7602a124435f0b016247808ffc6 | 2ec75e8b2fce5b21f8df8944eb2c792b78743552 | /server.py | 4b334f45597809d3e274e5b4f73eb0593cce7e46 | [] | no_license | smilefufu/tornado-s | 6ea541bdf3795913c3df070cda38d70665997f90 | 19bd56b33e70e36c9cc0c1d9b4f253b657caaed0 | refs/heads/master | 2020-05-20T16:45:25.109800 | 2015-11-19T14:38:43 | 2015-11-19T14:38:43 | 42,692,516 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | #!/usr/bin/env python
# encoding: utf-8
from tornado.ioloop import IOLoop
from tornado.web import Application, url
from tornado.options import options, define, parse_command_line
from lib.core import HTTPServer, config_settings, RequestHandler, RestfulApiHandler, ModuleRouter, ProviderManager
import logging
import sys
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append("./modules/")
import tornado.web
#class HtmlHandler(tornado.web.RequestHandler):
class HtmlHandler(RequestHandler):
'''
def post(self):
path = os.path.normpath(self.request.uri).strip('/')
local_path = os.path.join(self.settings['template_path'], path)
if not os.path.exists(local_path):
self.set_status(404)
return self.write("404 not found")
return self.render(path, _data=ModuleRouter(self.settings))
'''
def post(self):
#页面使用的provider配置在uri同目录下的.html.data文件中
#比如页面是/example/example2.html, 则配置文件为:/example/example2.html.data
local_path, tpl_path = self.find_template()
data = ProviderManager.getdata(local_path, self)
return self.render(tpl_path, **data)
def get(self):
return self.post()
class ApiHandler(RestfulApiHandler):
def request_dealer(self, method):
ret = dict()
return self.write(ret)
def make_app(dev=False):
'''定义url路由和服务选项等'''
return Application([
url(r"/api", ApiHandler),
url(r"/.*", HtmlHandler),
],
debug = dev,
gzip = True,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
def main():
define("config", type=str, help="path to config file")
define("dev", type=bool, help="dev mode switch", default=False)
define("port", type=int, help="port to listen", default=8218)
define("ac", type=str, help="Access-Control-Allow-Origin", default="*")
parse_command_line(final=True)
app = make_app(options.dev)
server = HTTPServer(app,xheaders=True)
define("pid", type=int, default=0)
options.pid = 0
if not options.dev:
server.bind(options.port)
options.pid = server.start(0) # Forks multiple sub-processes
else:
server.listen(options.port)
app.settings.update(config_settings(options))
IOLoop.current().start()
if __name__ == '__main__':
main()
| [
"fufu.bluesand@gmail.com"
] | fufu.bluesand@gmail.com |
1bab096720a3d1db7ea403b4988e1744b8ee3cd9 | 541523537649b48a96eef475d6b66e8e8270978d | /fenci/web/test/test.py | 554e8d9c53702256b8d8ed584dbcc758e7f9ac4b | [] | no_license | HNU-MSC/fenci | 03720288e29b3dbf05eb408d8af164d1e3ed7813 | dc9218eae7e58bafda5d659a01b473eb57b63861 | refs/heads/master | 2020-12-27T04:31:25.202604 | 2020-02-02T12:28:11 | 2020-02-02T12:28:11 | 237,766,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import json
with open('force_directed.json', 'r') as f:
res = f.read()
res = json.loads(res)
print(res['nodes']) | [
"noreply@github.com"
] | HNU-MSC.noreply@github.com |
8dc18ef2f8c1b9adeb021bbd1cc39ef1d13084b3 | 85ab4cc5e16e2e51fee8488f47f4ed1ecd043c61 | /examples/tracing/kvm_hypercall.py | 322bb8e50098e60cc6f8e678268741876e74d888 | [
"Apache-2.0"
] | permissive | polycube-network/bcc | 8fa018358f03fc2a3444910fade338de6933babf | b8158f43ceb884a9eef456d30f4f413604397a6a | refs/heads/master | 2022-06-25T14:00:23.852780 | 2020-01-27T11:16:21 | 2020-01-27T11:16:21 | 161,410,720 | 2 | 4 | Apache-2.0 | 2020-06-25T13:38:04 | 2018-12-12T00:26:32 | Python | UTF-8 | Python | false | false | 1,524 | py | #!/usr/bin/env python
#
# kvm_hypercall.py
#
# Demonstrates stateful kvm_entry and kvm_exit recording along with the
# associated hypercall when exit_reason is VMCALL. See kvm_hypercall.txt
# for usage
#
# REQUIRES: Linux 4.7+ (BPF_PROG_TYPE_TRACEPOINT support)
#
# Copyright (c) 2017 ShiftLeft Inc.
#
# Author(s):
# Suchakrapani Sharma <suchakra@shiftleft.io>
from __future__ import print_function
from bcc import BPF
# load BPF program
b = BPF(text="""
#define EXIT_REASON 18
BPF_HASH(start, u8, u8);
TRACEPOINT_PROBE(kvm, kvm_exit) {
u8 e = EXIT_REASON;
u8 one = 1;
if (args->exit_reason == EXIT_REASON) {
bpf_trace_printk("KVM_EXIT exit_reason : %d\\n", args->exit_reason);
start.update(&e, &one);
}
return 0;
}
TRACEPOINT_PROBE(kvm, kvm_entry) {
u8 e = EXIT_REASON;
u8 zero = 0;
u8 *s = start.lookup(&e);
if (s != NULL && *s == 1) {
bpf_trace_printk("KVM_ENTRY vcpu_id : %u\\n", args->vcpu_id);
start.update(&e, &zero);
}
return 0;
}
TRACEPOINT_PROBE(kvm, kvm_hypercall) {
u8 e = EXIT_REASON;
u8 zero = 0;
u8 *s = start.lookup(&e);
if (s != NULL && *s == 1) {
bpf_trace_printk("HYPERCALL nr : %d\\n", args->nr);
}
return 0;
};
""")
# header
print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "EVENT"))
# format output
while 1:
try:
(task, pid, cpu, flags, ts, msg) = b.trace_fields()
except ValueError:
continue
print("%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
| [
"goldshtn@gmail.com"
] | goldshtn@gmail.com |
03159707aa89c1ada2d4e3e3109c8a8cff6de8ff | 9b3df22a5352484d7cab3f3d32b2404a951b9d53 | /server/test_python_client.py | e2cebaaca06ecb4ed7a2e851e729a6b4332d487b | [] | no_license | nickmoop/untitled_messenger | 24694b41f623ca42abab790d6569108cc3dba46b | 089c5c06f94aee9bf7273e8c56c35b0872e63f83 | refs/heads/master | 2021-08-06T10:00:45.859278 | 2017-11-04T21:55:04 | 2017-11-04T21:55:04 | 109,385,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from socket import *
SERVER_HOSTNAME = 'localhost'
SERVER_PORT = 50007
def send(socket_object, message=None):
if message:
print('Encode message: {}'.format(message))
message = [message.encode()]
print('Send message')
for line in message:
socket_object.send(line)
else:
print('Empty message')
def connect():
socket_object = socket(AF_INET, SOCK_STREAM)
socket_object.connect((SERVER_HOSTNAME, SERVER_PORT))
print('Connected to socket with host: {}, and port: {}'.format(
SERVER_HOSTNAME, SERVER_PORT)
)
return socket_object
def disconnect(socket_object):
print('Disconnect from socket')
socket_object.close()
if __name__ == '__main__':
import sys
import time
message_to_send = 'Test message for send to server'
if len(sys.argv) >= 2:
message_to_send = ' '.join(sys.argv[1:])
test_socket = connect()
send(test_socket, message=message_to_send)
time.sleep(3)
send(test_socket, message='I will disconnect now')
disconnect(test_socket)
| [
"nicolay.chirik@gmail.com"
] | nicolay.chirik@gmail.com |
b24113a5aef60eee13af9475c0f32a9ad3eb8856 | e9b0a4cbd4757a598fbc8b3973f69dfd76b2020d | /app_system/migrations/0002_userssconfig_user.py | 2062f4dd7f79ecb1b1bcae0300611f6e00bfc068 | [
"Apache-2.0"
] | permissive | visoon0012/plover.cloud | fa58cdc8b241183670a229ccf4db64f78ed023d1 | 04542628758d969085eb6172928165fddb5d2677 | refs/heads/master | 2022-12-14T08:08:10.099953 | 2018-08-01T10:08:14 | 2018-08-01T10:08:14 | 129,711,956 | 1 | 0 | Apache-2.0 | 2022-11-22T02:28:49 | 2018-04-16T08:41:44 | Python | UTF-8 | Python | false | false | 593 | py | # Generated by Django 2.0.2 on 2018-05-31 10:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('app_system', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='userssconfig',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"visoon0012@hotmail.com"
] | visoon0012@hotmail.com |
6c84ccddbbd2a6110e5b60242adf271558d404ee | 83ed8b754703a1c9e661c90f0763bfebbc0f2606 | /爬虫/抓取动态Ajax请求的数据.py | 872e0b1c73089f5de87f161395db0e8837b3d7ad | [] | no_license | zbh123/hobby | 4ce267a20e1af7f2accd2bde8d39af269efa319b | 2215c406fe7700bf150fd536dd56823a2e4733d1 | refs/heads/master | 2021-08-02T10:31:34.683391 | 2021-07-26T07:26:16 | 2021-07-26T07:26:16 | 150,555,879 | 4 | 0 | null | 2021-07-27T07:34:28 | 2018-09-27T08:41:44 | Python | UTF-8 | Python | false | false | 1,199 | py | import urllib.request
import ssl
import json
import re
def ajaxCrawler(url):
headers = {
"User-Agent": "User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0"
}
req = urllib.request.Request(url, headers=headers)
#使用ssl创建未验证的版本
context = ssl._create_unverified_context()
response = urllib.request.urlopen(req,context=context)
# response = urllib.request.urlopen(req)
jsonStr = response.read().decode("utf-8")
# jsonStr = re.sub(r'<html>|<head>|</head>|<body>|<pre style="word-wrap: break-word; white-space: pre-wrap;">','', jsonStr)
# jsonStr = re.sub(r'</pre>|</body>|</html>|</script>','', jsonStr)
# print(jsonStr)
jsonData = json.loads(jsonStr)
return jsonData
url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=20"
info = ajaxCrawler(url)
print(type(info))
for i in range(11):
url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=" +str(i*20)
info = ajaxCrawler(url)
print(type(info))
| [
"noreply@github.com"
] | zbh123.noreply@github.com |
c15e0849f76eff4678bd60d201ed33b6738de563 | 3a0deef4feb62d9b0cee5c581cbc57233d356f9d | /ParkEasy/migrations/0001_initial.py | 4ab181853091171dd6fb4a862bbce2da2d69d75f | [] | no_license | sirwill98/William_Rodgers_Graded_Unit | d1d24edf2d78b7eb421e1f2307fd424e4a1c8426 | 94ce68bc97773a93c4c2b963b0252ea5c0830d2a | refs/heads/master | 2021-06-05T10:29:27.070287 | 2020-11-12T13:32:30 | 2020-11-12T13:32:30 | 131,712,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,506 | py | # Generated by Django 2.0.3 on 2018-05-24 12:40
import ParkEasy.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Arriving',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arriving_flight_number', models.TextField(max_length=16)),
('arriving_flight_datetime', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('booking_date', models.DateField(default=django.utils.timezone.now)),
('booking_length', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Departing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('departing_flight_number', models.TextField(max_length=16)),
('departing_flight_datetime', models.DateTimeField()),
('destination', models.TextField(max_length=64)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_paid', models.DateTimeField(default=django.utils.timezone.now)),
('paid', models.BooleanField(default=False)),
('amount', models.IntegerField()),
('booking', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Booking')),
],
),
migrations.CreateModel(
name='Prices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vip', models.IntegerField(default=0)),
('valet', models.IntegerField(default=0)),
('day', models.FloatField(default=1.2)),
('base', models.IntegerField(default=27)),
('after_five', models.IntegerField(default=10)),
('is_current', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reg_no', models.TextField(max_length=7)),
('make', models.TextField()),
('manufacturer', models.TextField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('password', models.TextField(default='', max_length=100)),
('email', models.EmailField(default='', max_length=100, unique=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this site.')),
('address_line1', models.CharField(max_length=100)),
('address_line2', models.CharField(max_length=100)),
('postcode', models.CharField(max_length=16)),
('tel_no', models.CharField(max_length=20)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', ParkEasy.models.UserManager()),
],
),
migrations.AddField(
model_name='departing',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='booking',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='booking',
name='prices',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Prices'),
),
migrations.AddField(
model_name='booking',
name='vehicle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ParkEasy.Vehicle'),
),
migrations.AddField(
model_name='arriving',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"billyboy2410@hotmail.co.uk"
] | billyboy2410@hotmail.co.uk |
400ac17153480a63df98dda5dac0d88bf318c97e | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtWidgets/QGraphicsPixmapItem.py | 0b913af111c321403b7dbad1da4f899c98fdb78f | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | # encoding: utf-8
# module PySide2.QtWidgets
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
from QGraphicsItem import QGraphicsItem
class QGraphicsPixmapItem(QGraphicsItem):
# no doc
def boundingRect(self, *args, **kwargs): # real signature unknown
pass
def contains(self, *args, **kwargs): # real signature unknown
pass
def extension(self, *args, **kwargs): # real signature unknown
pass
def isObscuredBy(self, *args, **kwargs): # real signature unknown
pass
def offset(self, *args, **kwargs): # real signature unknown
pass
def opaqueArea(self, *args, **kwargs): # real signature unknown
pass
def paint(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self, *args, **kwargs): # real signature unknown
pass
def setOffset(self, *args, **kwargs): # real signature unknown
pass
def setPixmap(self, *args, **kwargs): # real signature unknown
pass
def setShapeMode(self, *args, **kwargs): # real signature unknown
pass
def setTransformationMode(self, *args, **kwargs): # real signature unknown
pass
def shape(self, *args, **kwargs): # real signature unknown
pass
def shapeMode(self, *args, **kwargs): # real signature unknown
pass
def transformationMode(self, *args, **kwargs): # real signature unknown
pass
def type(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
BoundingRectShape = None # (!) real value is ''
HeuristicMaskShape = None # (!) real value is ''
MaskShape = None # (!) real value is ''
ShapeMode = None # (!) real value is ''
| [
"noreply@github.com"
] | cundesi.noreply@github.com |
ee7907386d75d7bd896092d36804b4b05cab52d4 | b04b52614b31a5d77239d19b4259f84abb2cbea5 | /adminpanel/migrations/0023_album.py | 03b00f2f23fc6b2ecfeaed66b54429ce8761ca75 | [] | no_license | nawed-xigmapro/vibanote | ae348561d7c6b8b208a1b6a8f46db2c928e3df91 | 03f2f23497203e4dbfde43c5bb8898a5e77492fa | refs/heads/master | 2021-01-19T19:01:59.108160 | 2017-08-23T13:38:30 | 2017-08-23T13:38:30 | 101,181,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-03 14:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('adminpanel', '0022_remove_video_artist'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100, null=True)),
('slug', models.CharField(blank=True, max_length=100, null=True)),
('subtitle', models.CharField(blank=True, max_length=200, null=True)),
('album_image', models.ImageField(null=True, upload_to='albumimages')),
('dedicate', models.CharField(blank=True, max_length=255, null=True)),
('is_approved', models.IntegerField(blank=True, null=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('genre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adminpanel.Genre')),
('types', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='adminpanel.Type')),
('uploadby', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"nawed@xigmapro.com"
] | nawed@xigmapro.com |
23d781e34d8d2f3ae61620fd43b6f47b75e59a5b | 5b0ff689a3e14f42bdf688864cae40c931a5f685 | /msa/core/armve/tests/test_multi_write.py | b2424dcb0537e0251c93404cf4c3107e15a472cd | [] | no_license | prometheus-ar/vot.ar | cd7012f2792a2504fb7f0ee43796a197fc82bd28 | 72d8fa1ea08fe417b64340b98dff68df8364afdf | refs/heads/2017-ago-salta | 2021-01-02T22:19:41.591077 | 2017-08-25T11:55:49 | 2017-08-25T11:55:49 | 37,735,555 | 171 | 110 | null | 2020-06-30T13:33:49 | 2015-06-19T17:15:52 | JavaScript | UTF-8 | Python | false | false | 1,712 | py | #!/usr/bin/env python
# coding: utf-8
from __future__ import division
from serial import Serial
from msa.core.armve.constants import DEV_PRINTER, CMD_PRINTER_PAPER_START, \
CMD_PRINTER_MOVE, EVT_PRINTER_PAPER_INSERTED, CMD_PRINTER_PRINT, \
CMD_PRINTER_PAPER_REMOVE, DEV_RFID, EVT_RFID_NEW_TAG,\
CMD_PRINTER_LOAD_COMP_BUFFER, MSG_EV_PUB
from msa.core.armve.protocol import Printer, RFID, Device, Agent, \
PowerManager, PIR
from msa.core.armve.settings import SERIAL_PORT
def init_channel():
channel = Serial(SERIAL_PORT, timeout=3)
if not channel.isOpen():
channel.open()
channel.flushInput()
channel.flushOutput()
return channel
def test_boleta():
channel = init_channel()
agent = Agent(channel)
init = agent.initialize()
printer = Printer(channel)
rfid = RFID(channel)
device = Device(channel)
#esperar_evento(device, DEV_PRINTER, EVT_PRINTER_PAPER_INSERTED)
#print rfid.get_multitag_data()
tags_data = rfid.get_tags()[0]
serial_number = tags_data['serial_number'][0]
rfid.write_tag(serial_number, 4, "1C",
"--00--01--02--03--04--05--06--07--08--09--10--11--12"
"--13--14--15--16--17--18--19--20--21--22--23--24--25"
"--26--27--28--29--30--31--32--33--34--35--36--37--38"
"--39--40--41--42--43--44--45--46--47--48--49--50--51"
)
rfid.get_multitag_data()
def esperar_evento(device, device_id, event):
print("esperando evento", device_id, event)
esperando = True
while esperando:
ret = device.read(True)
if ret is not None and ret[1:] == (device_id, event, MSG_EV_PUB):
esperando = False
if __name__ == "__main__":
test_boleta()
| [
"prometheus@olympus.org"
] | prometheus@olympus.org |
db2a2a1a31115c20f2aa9461c575fc65b4918eef | 9838d1b978bf34926c2f881b8e06ace732869997 | /assignment2/counter.py | 9a61e7e5efc846d762d3f0e7ae1ff8a8c7a9a3be | [] | no_license | thinkocapo/big-data-processing | 57678e9cca144c3e310aae82283062d95fe99147 | 5e417c08c7cd3cada732963b32624b6faa8a8d8d | refs/heads/master | 2023-04-15T12:27:08.028286 | 2019-11-12T04:27:41 | 2019-11-12T04:27:41 | 207,067,630 | 1 | 0 | null | 2023-03-31T14:51:22 | 2019-09-08T05:43:50 | Python | UTF-8 | Python | false | false | 7,330 | py | import argparse
import csv
import datetime
import json
import multiprocessing
from multiprocessing import Process, Value, Lock
import os
import pprint
import time
import threading
from random import randint
# Capture any exceptions and send to Sentry.io :)
if 'DSN_DATA_PIPELINE' in os.environ:
import sentry_sdk
sentry_sdk.init(os.environ['DSN_DATA_PIPELINE'])
field_names = ['uuid', 'timestamp', 'url', 'userId', 'country', 'ua_browser', 'ua_os', 'response_status', 'TTFB']
# Makes a data structure like { date1: { url:count } }
def query1(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in server_process_dict[timestamp_key]:
url_dict[url] = url_dict[url] + 1
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = 1
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: 1 }
# Makes a data structure like { timestamp: { url: [users] }}
def query2(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
userId = row['userId']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in url_dict:
url_dict[url].append(userId)
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = [userId]
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: [userId] }
# Makes a data structure that looks like { timestamp: { url: [uuids] }}
def query3(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
userId = row['userId']
uuid = row['uuid']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
url_dict = server_process_dict[timestamp_key]
if url in url_dict:
url_dict[url].append(uuid)
server_process_dict[timestamp_key] = url_dict
else:
url_dict[url] = [uuid]
server_process_dict[timestamp_key] = url_dict
else:
server_process_dict[timestamp_key] = { url: [uuid] }
# Makes a data structure that looks like { timestamp: { country: [urls] }}
def problem4(lock, fileName, server_process_dict):
input_file = csv.DictReader(open(fileName), fieldnames=field_names)
for row in input_file:
timestamp = row['timestamp']
url = row['url']
country = row['country']
obj = datetime.time()
try:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
obj = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
timestamp_key = '%s-%s-%s:%s' % (obj.year, obj.month, obj.day, obj.hour)
with lock:
if timestamp_key in server_process_dict:
country_dict = server_process_dict[timestamp_key]
if country in country_dict:
country_dict[country].append(url)
server_process_dict[timestamp_key] = country_dict
else:
country_dict[country] = [url]
server_process_dict[timestamp_key] = country_dict
else:
server_process_dict[timestamp_key] = { country: [url] }
def printer(query):
# Print the number of unique URL's per query
if query == query1:
for k,v in server_process_dict.items():
unique_urls = server_process_dict[k].items()
print k, len(unique_urls)
# Print the number of unique visitors per URL per day
if query == query2:
for timestamp_key, url_dict in server_process_dict.items():
for url, users in url_dict.items():
unique_users = set(user for user in users)
print timestamp_key, url, len(unique_users)
# Print the number of uuids (unique event clicks) per URL per hour per day
if query == query3:
for timestamp_key, url_dict in server_process_dict.items():
for url, uuids in url_dict.items():
unique_uuids = set(uuid for uuid in uuids)
print timestamp_key, url, len(unique_uuids)
# Problem 4
if query == problem4:
for timestamp_key, country_dict in server_process_dict.items():
for country, urls in country_dict.items():
unique_urls = set(url for url in urls)
print timestamp_key, country, len(unique_urls)
# EXAMPLE USAGE:
# python3 countery.py query1
# python3 countery.py query2
# python3 countery.py query4
# python3 countery.py problem4
if __name__ == '__main__':
# Specify the number of threads and query from command-line
parser = argparse.ArgumentParser()
parser.add_argument("query", type=str, help="query1 query2 query3")
args = parser.parse_args()
# Which program are we calling
queries={'query1': query1, 'query2': query2, 'query3': query3, 'problem4': problem4}
query = queries[args.query]
fileNames = ('./input_files/file-input1.csv', 'input_files/file-input2.csv', 'input_files/file-input3.csv', 'input_files/file-input4.csv')
with multiprocessing.Manager() as manager:
server_process_dict = manager.dict()
lock = Lock()
processes = []
for i in range(4):
process = multiprocessing.Process(target=query, args=(lock, fileNames[i], server_process_dict,))
processes.append(process)
process.start()
for curr_process in processes:
curr_process.join()
printer(query)
| [
"thinkocapo@gmail.com"
] | thinkocapo@gmail.com |
21c7744fe9bf965805efcb90210e003fbc603dfe | f4ea512b8a156abfaef83f25e044b94c37b6dedb | /questions/ABC194/C.py | fe4fe5952d374d1f02b2c0ed3a53e5bd84cd5e11 | [] | no_license | HubHikari/CompetitiveProgramming | 9d1483e0a964508e84ae233aba9cb4ade7ff88e3 | b7c471a80378183b2ed5fe86fa829ee89d26709f | refs/heads/main | 2023-04-13T07:00:13.812800 | 2021-04-24T13:51:27 | 2021-04-24T13:51:27 | 308,897,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from itertools import permutations
DEBUG_MODE = 0
MAX_NUM=2**63-1
def DBG(s):
if DEBUG_MODE == 1:
print("DEBUG: ")
print(s)
#入力変数の数がN個の場合
N=int(input())
A = list(map(int, input().split()))
s1=sum(A)
A2=[]
for i in range(0,N):
A2.append(A[i]*A[i])
s2=sum(A2)
Ai=(N-1)*s2
DBG(Ai)
kake=[]
for i in range(0,N):
kake.append(A[i]*(s1-A[i]))
DBG(sum(kake))
ans=Ai-sum(kake)
print(ans)
| [
"donhanya0321@gmail.com"
] | donhanya0321@gmail.com |
bbd3db53b09bf960e6e995204e2771897492d6dc | 599d569b586cb1414886b1a2454cf3c59c4362bd | /master_classifyNewcase.py | 1e11353691c3a1709fc3f692cee4905ea5ed08fd | [] | no_license | cgallego/master | 77511ff3330882f0c5456beaedd81468d7a99bb1 | 2a04d66ac783f5729413aecf9c66037fc8501c78 | refs/heads/master | 2016-09-07T19:02:40.537285 | 2014-07-28T16:05:01 | 2014-07-28T16:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,972 | py | # -*- coding: utf-8 -*-
"""
Master python script to run each module in sequence
Arguments:
============
sys.argv[1] = input text file with one case per line in the following format:
BenignNMaligNAnt StudyNumber DicomExamNumber LesionID StudyDate SeriesID BreastSide PathReportID PathoBreastSide
Created on Tue Apr 08 14:20:35 2014
@ author (C) Cristina Gallego, University of Toronto
----------------------------------------------------------------------
"""
import os, os.path
import sys
import string
from sys import argv, stderr, exit
import shlex, subprocess
import re
import numpy as np
import dicom
import psycopg2
import sqlalchemy as al
import sqlalchemy.orm
import pandas as pd
from query_database import *
from dictionaries import my_aet, hostID, local_port, clinical_aet, clinical_IP, clinical_port, remote_aet, remote_IP, remote_port
import dcmtk_routines as dcmtk
from inputs_init import *
from display import *
from segment import *
from features_dynamic import *
from features_morphology import *
from features_texture import *
import pylab
# convertion packages
import pandas.rpy.common as com
from rpy2.robjects.numpy2ri import numpy2ri
from rpy2.robjects.packages import importr
import rpy2.robjects as R
from rpy2.robjects import globalenv
from classifyCascade import *
def getScans(path_rootFolder, fileline, PatientID, StudyID, AccessionN, oldExamID):
"""
run : getScans(path_rootFolder, PatientID, StudyID, AccessionN):
Inputs
======
path_rootFolder: (string) Automatically generated based on the location of file
PatientID : (int) MRN
StudyID : (int) CAD StudyID
AccessionN : (int) CAD AccessionN
database : (bool) [True] whether to check database for info about study.
Output
======
"""
try:
dcmtk.check_MRI_MARTEL(data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN)
if(oldExamID==False):
dcmtk.pull_MRI_MARTEL(path_rootFolder, data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN, countImages=False)
else:
ExamID = fileline[4]
dcmtk.pull_MRI_MARTELold(path_rootFolder, data_loc, remote_aet, remote_port, remote_IP, local_port, PatientID, StudyID, AccessionN, ExamID, countImages=False)
except (KeyboardInterrupt, SystemExit):
dcmtk.check_pacs(path_rootFolder, data_loc, clinical_aet , clinical_port, clinical_IP, local_port, PatientID, StudyID, AccessionN)
dcmtk.pull_pacs(path_rootFolder, data_loc, clinical_aet, clinical_port, clinical_IP, local_port, PatientID, StudyID, AccessionN)
except (KeyboardInterrupt, SystemExit):
print 'Unable to find study in MRI_MARTEL or AS0SUNB --- Abort'
sys.exit()
return
if __name__ == '__main__':
# Get Root folder ( the directory of the script being run)
path_rootFolder = os.path.dirname(os.path.abspath(__file__))
print path_rootFolder
# Open filename list
file_ids = open(sys.argv[1],"r")
init_flag=1
for fileline in file_ids:
# Get the line: StudyNumber DicomExamNumber MRN chosen_lesions_id StudyDate SeriesID image_pos_pat image_ori_pat
fileline = fileline.split()
cond = fileline[0]
StudyID = fileline[1]
DicomExamNumber = fileline[2]
Lesions_id = fileline[3]
dateID = fileline[4]
SeriesID = fileline[5] # corresponds to dynamic sequence;
#############################
###### 1) Retrieving Images from Research PACS
#############################
print "Retrieving Scans to local drive..."
#getScans(path_rootFolder, fileline, PatientID, StudyID, AccessionN, oldExamID=False)
#############################
###### 2) Querying Research database for clinical, pathology, radiology data
#############################
print "Executing SQL connection..."
# Format query StudyID
if (len(StudyID) >= 4 ): fStudyID=StudyID
if (len(StudyID) == 3 ): fStudyID='0'+StudyID
if (len(StudyID) == 2 ): fStudyID='00'+StudyID
if (len(StudyID) == 1 ): fStudyID='000'+StudyID
# Format query redateID
redateID = dateID[0:4]+'-'+dateID[4:6]+'-'+dateID[6:8]
# perform query
queryData = Query()
queryData.queryDatabase(fStudyID, redateID)
rowCase=["0", "0"]
rowCase = int(raw_input('pick row (0-n): '))
# recollect pathologies
queryData.d1['is_insitu'] = pd.Series(True, index=queryData.d1)
queryData.d1['is_invasive'] = pd.Series(True, index=queryData.d1)
queryData.d1['Diagnosis'] = pd.Series(True, index=queryData.d1)
queryData.d1['BenignNMaligNAnt'] = pd.Series(True, index=queryData.d1)
queryData.d1['labels'] = pd.Series(True, index=queryData.d1)
ansLesion = array((raw_input('Enter: is_insitu?: is_invasive?: ')).split()).astype(bool)
#slice data, get only 1 record
dataCase = pd.Series( queryData.d1.loc[rowCase,:] )
dataCase['is_insitu'] = ansLesion[0]
dataCase['is_invasive'] = ansLesion[1]
ansDiag=["diagnosis"]
ansDiag = str(raw_input('Dignosis: '))
dataCase['Diagnosis'] = ansDiag
dataCase['BenignNMaligNAnt'] = cond[:-1]
dataCase['labels'] = cond
if(init_flag):
casesFrame = pd.DataFrame(columns=queryData.d1.columns)
init_flag=False
#############################
###### 3) Extractfeatures
#############################
###### Start by Loading
print "Start by loading volumes..."
load = Inputs_init()
data_loc='Z:\Cristina\MassNonmass'+os.sep+cond[:-1]
[series_path, phases_series, lesionID_path] = load.readVolumes(data_loc, StudyID, DicomExamNumber, SeriesID, Lesions_id)
print "Path to series location: %s" % series_path
print "List of pre and post contrast volume names: %s" % phases_series
print "Path to lesion segmentation: %s" % lesionID_path
print "\n Load Segmentation..."
lesion3D = load.loadSegmentation(lesionID_path)
print "Data Structure: %s" % lesion3D.GetClassName()
print "Number of points: %d" % int(lesion3D.GetNumberOfPoints())
print "Number of cells: %d" % int(lesion3D.GetNumberOfCells())
print "\n Visualize volumes..."
# Create only 1 display
loadDisplay = Display()
lesion3D_mesh = loadDisplay.addSegment(lesion3D, (0,1,0), interact=False)
loadDisplay.visualize(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, sub=True, postS=4, interact=True)
# Get z slice
LesionZslice = loadDisplay.zImagePlaneWidget.GetSliceIndex()
#############################
# 4) Create Segmentation of lesion. Comment out if not needed ( define seededlesion3D = lesion3D )
#############################
createSegment = Segment()
print "\n Displaying picker for lesion segmentation"
seeds = loadDisplay.display_pick(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, 4, LesionZslice)
seededlesion3D = createSegment.segmentFromSeeds(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, seeds, loadDisplay.iren1, loadDisplay.xImagePlaneWidget, loadDisplay.yImagePlaneWidget, loadDisplay.zImagePlaneWidget)
seededlesion3D_mesh = loadDisplay.addSegment(seededlesion3D, (0,0,1), interact=True)
loadDisplay.picker.RemoveAllObservers()
# save it to file
createSegment.saveSegmentation(lesionID_path, seededlesion3D)
#############################
###### Extract Dynamic features
#############################
print "\n Extract Dynamic contour features..."
loadDynamic = Dynamic()
dynamicfeatures_contour = loadDynamic.extractfeatures_contour(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print "\n=========================================="
print dynamicfeatures_contour
print "\n Extract Dynamic inside features..."
dynamicfeatures_inside = loadDynamic.extractfeatures_inside(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print dynamicfeatures_inside
print "\n=========================================="
#############################
###### Extract Morphology features
#############################
print "\n Extract Morphology features..."
loadMorphology = Morphology()
morphofeatures = loadMorphology.extractfeatures(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D)
print "\n=========================================="
print morphofeatures
print "\n=========================================="
#############################
###### Extract Texture features
#############################
print "\n Extract Texture features..."
loadTexture = Texture()
texturefeatures = loadTexture.extractfeatures(load.DICOMImages, load.image_pos_pat, load.image_ori_pat, series_path, phases_series, seededlesion3D, loadMorphology.VOI_efect_diameter, loadMorphology.lesion_centroid )
print "\n=========================================="
print texturefeatures
print "\n=========================================="
# deal with closing windows, plots, renders, actors
pylab.close('all')
loadDisplay.renderer1.RemoveActor(loadDisplay.actor_mesh)
loadDisplay.iren1.TerminateApp()
loadDisplay.renWin1.Finalize()
#############################
###### Finish tidying up and save to file
## append collection of cases
#############################
casesFrame = casesFrame.append(dataCase) # 20
casesFrame['id']=fStudyID
casesFrame.set_index('id',inplace=False)
dynamicfeatures_contour['id']=fStudyID
dynamicfeatures_contour.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, dynamicfeatures_contour, on='id', how='inner')
dynamicfeatures_inside['id']=fStudyID
dynamicfeatures_inside.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, dynamicfeatures_inside, on='id', how='inner')
morphofeatures['id']=fStudyID
morphofeatures.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, morphofeatures, on='id', how='inner')
texturefeatures['id']=fStudyID
texturefeatures.set_index('id',inplace=False)
casesFrame = pd.merge(casesFrame, texturefeatures, on='id', how='inner')
# end of line
os.chdir("Z:\Cristina\MassNonmass\codeProject\codeBase\extractFeatures\casesDatabase")
casesFrame.to_csv('casesFrames_toclasify.csv')
#############################
## Classification stage: send features to classifier to generate new case prediction
## Cascade current classifier
#############################
classifier = classifyCascade()
classifier.case_classifyCascade()
file_ids.close()
| [
"admin@webdsdesign.com"
] | admin@webdsdesign.com |
ea6e913cfb0bfbdeae407ef6826a14197f46c3c5 | 805a795ea81ca8b5cee1dec638585011da3aa12f | /MAIN/2.79/python/lib/site-packages/OpenGL/GLES2/EXT/float_blend.py | b15df56ee4bf4fa5dd71042d1b67ad8dbacc6e7d | [
"Apache-2.0"
] | permissive | josipamrsa/Interactive3DAnimation | 5b3837382eb0cc2ebdee9ee69adcee632054c00a | a4b7be78514b38fb096ced5601f25486d2a1d3a4 | refs/heads/master | 2022-10-12T05:48:20.572061 | 2019-09-26T09:50:49 | 2019-09-26T09:50:49 | 210,919,746 | 0 | 1 | Apache-2.0 | 2022-10-11T01:53:36 | 2019-09-25T19:03:51 | Python | UTF-8 | Python | false | false | 750 | py | '''OpenGL extension EXT.float_blend
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.float_blend to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/float_blend.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.float_blend import *
from OpenGL.raw.GLES2.EXT.float_blend import _EXTENSION_NAME
def glInitFloatBlendEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"jmrsa21@gmail.com"
] | jmrsa21@gmail.com |
1ed4eb824895a8efbf8b5abcf633e22583dae17e | 802a34c7452f7035b3f3441169fb2e3c1743b2e9 | /Modelling/logistic_regression_pipeline.py | 3e07b150de7d5fe35ca90a48dc2fb65f9e5f1ee9 | [] | no_license | XingLLiu/ED_code | 6756a945c213df4676a7278aa89de0675afb87c9 | 79f6d680354a944a538a3d1983fbc898b0d1c097 | refs/heads/master | 2020-06-15T09:05:59.861517 | 2019-08-27T20:32:55 | 2019-08-27T20:32:55 | 195,255,510 | 0 | 0 | null | 2019-08-26T00:40:25 | 2019-07-04T14:22:05 | Python | UTF-8 | Python | false | false | 6,888 | py | # ----------------------------------------------------
# To run:
# 1. customize hyper-parameters and DATA_PATH in Section 0
# 2. in Terminal:
# python logistic_regression_pipeline.py
# ----------------------------------------------------
from ED_support_module import *
from ED_support_module import EPICPreprocess
from ED_support_module import Evaluation
from ED_support_module import LogisticRegression
# ----------------------------------------------------
# ========= 0. Preliminary seetings =========
MODEL_NAME = "LR"
RANDOM_SEED = 20
MODE = "c"
FPR_THRESHOLD = 0.1
PENALTY = "l1" # Penalty of the first fit
# Arguments
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--random_seed",
default=27,
required=True,
type=int,
help="Random seed.")
parser.add_argument("---dynamic",
default=True,
required=True,
type=bool,
help="If using one-month ahead prediction.")
parser.add_argument("--path",
required=True,
type=str,
help="Path to save figures.")
return parser
# Parser arguements
# parser = setup_parser()
# args = parser.parse_args()
# ----------------------------------------------------
# Path set-up
FIG_PATH = "../../results/logistic_regression/"
DATA_PATH = "../../data/EPIC_DATA/preprocessed_EPIC_with_dates_and_notes.csv"
FIG_ROOT_PATH = FIG_PATH + f"dynamic_{MODE}_seeds{RANDOM_SEED}_{PENALTY}penalty/"
# Create folder if not already exist
if not os.path.exists(FIG_PATH):
os.makedirs(FIG_PATH)
# ----------------------------------------------------
# ========= 1. Further preprocessing =========
preprocessor = EPICPreprocess.Preprocess(DATA_PATH)
EPIC, EPIC_enc, EPIC_CUI, EPIC_arrival = preprocessor.streamline()
# Get numerical columns (for later transformation)
num_cols = preprocessor.which_numerical(EPIC)
num_cols.remove("Primary.Dx")
# Get time span
time_span = EPIC_arrival['Arrived'].unique().tolist()
# ----------------------------------------------------
# ========= 2.a. One-month ahead prediction =========
print("====================================")
print("Dynamically evaluate the model ...\n")
for j, time in enumerate(time_span[2:-1]):
# Month to be predicted
time_pred = time_span[j + 3]
# Create folder if not already exist
DYNAMIC_PATH = FIG_ROOT_PATH + f"{time_pred}/"
if not os.path.exists(DYNAMIC_PATH):
os.makedirs(DYNAMIC_PATH)
# Prepare train/test sets
XTrain, XTest, yTrain, yTest= splitter(EPIC_arrival,
num_cols = num_cols,
mode = MODE,
time_threshold = time,
test_size =None,
EPIC_CUI = EPIC_CUI,
seed=RANDOM_SEED)
print("Training for data up to {} ...".format(time))
print( "Train size: {}. Test size: {}. Sepsis cases in [train, test]: [{}, {}]."
.format( yTrain.shape, yTest.shape, yTrain.sum(), yTest.sum() ) )
# ========= 2.a.i. Model =========
# Apply SMOTE
smote = SMOTE(random_state = RANDOM_SEED, sampling_strategy = 'auto')
col_names = XTrain.columns
XTrain, yTrain = smote.fit_sample(XTrain, yTrain)
XTrain = pd.DataFrame(XTrain, columns=col_names)
# Fit logistic regression
model = sk.linear_model.LogisticRegression(solver = 'liblinear', penalty = PENALTY,
max_iter = 1000, random_state = RANDOM_SEED).fit(XTrain, yTrain)
# Re-fit after removing features of zero coefficients
XTrain = model.remove_zero_coef_(XTrain)
model_new = sk.linear_model.LogisticRegression(solver = 'liblinear', penalty = 'l2',
max_iter = 1000, random_state = RANDOM_SEED).fit(XTrain, yTrain)
# Predict
# Note that remove_zero_coef_ does not use XTest in training. It only removes some
# features according to the LR model.
XTest = model.remove_zero_coef_(XTest)
pred_new = model_new.predict_proba(XTest)[:, 1]
# ========= 2.a.ii. Plot beta values =========
# Plot the features whose coefficients are the top 50 largest in magnitude
non_zero_coeffs = model_new.coef_[model_new.coef_ != 0]
indices = np.argsort(abs(non_zero_coeffs))[::-1][:50]
_ = plt.figure()
_ = plt.title("Logistic Regression Coefficients Values")
_ = sns.barplot(y = XTrain.columns[indices], x = np.squeeze(non_zero_coeffs)[indices])
_ = plt.yticks(fontsize = 4)
plt.savefig(DYNAMIC_PATH + f"coeffs_{time_pred}.eps", format = 'eps', dpi = 800)
plt.close()
# ========= 2.c. Feature importance =========
# Permutation test
imp_means, imp_vars = feature_importance_permutation(
predict_method = model_new.predict_proba_single,
X = np.array(XTest),
y = np.array(yTest),
metric = true_positive_rate,
fpr_threshold = FPR_THRESHOLD,
num_rounds = 5,
seed = RANDOM_SEED)
fi_evaluator = Evaluation.FeatureImportance(imp_means, imp_vars, XTest.columns, MODEL_NAME)
# Save feature importance plot
fi_evaluator.FI_plot(save_path = DYNAMIC_PATH, y_fontsize = 8, eps = True)
# ========= 2.b. Evaluation =========
evaluator = Evaluation.Evaluation(yTest, pred_new)
# Save ROC plot
_ = evaluator.roc_plot(plot = False, title = MODEL_NAME, save_path = DYNAMIC_PATH + f"roc_{time_pred}")
# Save summary
summary_data = evaluator.summary()
summary_data.to_csv(DYNAMIC_PATH + f"summary_{time_pred}.csv", index = False)
# ========= 2.c. Save predicted results =========
pred_new = pd.DataFrame(pred_new, columns = ["pred_prob"])
pred_new.to_csv(DYNAMIC_PATH + f"predicted_result_{time_pred}.csv", index = False)
# ========= End of iteration =========
print("Completed evaluation for {}.\n".format(time_pred))
# ========= 2.c. Summary plots =========
print("Saving summary plots ...")
summary_plot_path = FIG_ROOT_PATH
# Subplots of ROCs
evaluator.roc_subplot(summary_plot_path, time_span, [3, 3], eps = True)
# Aggregate ROC
aggregate_summary = evaluator.roc_aggregate(summary_plot_path, time_span, eps = True)
# Save aggregate summary
aggregate_summary.to_csv(summary_plot_path + "aggregate_summary.csv", index = False)
print("Summary plots saved at {}".format(summary_plot_path))
print("====================================")
| [
"liuxing971015@outlook.com"
] | liuxing971015@outlook.com |
6cb2260307d2f7c6cbf7b028abac70e25d96e6fd | 972322a06d74e90be88b32204d5a777b24c95a3c | /weatherVenv/lib/python3.8/site-packages/twilio/rest/preview/__init__.py | 667c76b23bd2cd834e13bb10628482bad99f6515 | [] | no_license | Ktailor34/weatherBot | 5ae95f0635d658ba7d8792afe882c01518bc7025 | ca6029b19e085d301b1358a38c70186d864215b5 | refs/heads/master | 2022-12-15T14:02:33.400798 | 2020-09-10T21:38:51 | 2020-09-10T21:38:51 | 240,813,256 | 2 | 0 | null | 2022-12-08T03:37:21 | 2020-02-16T01:27:21 | Python | UTF-8 | Python | false | false | 7,099 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.preview.bulk_exports import BulkExports
from twilio.rest.preview.deployed_devices import DeployedDevices
from twilio.rest.preview.hosted_numbers import HostedNumbers
from twilio.rest.preview.marketplace import Marketplace
from twilio.rest.preview.sync import Sync
from twilio.rest.preview.trusted_comms import TrustedComms
from twilio.rest.preview.understand import Understand
from twilio.rest.preview.wireless import Wireless
class Preview(Domain):
def __init__(self, twilio):
"""
Initialize the Preview Domain
:returns: Domain for Preview
:rtype: twilio.rest.preview.Preview
"""
super(Preview, self).__init__(twilio)
self.base_url = 'https://preview.twilio.com'
# Versions
self._bulk_exports = None
self._deployed_devices = None
self._hosted_numbers = None
self._marketplace = None
self._sync = None
self._understand = None
self._wireless = None
self._trusted_comms = None
@property
def bulk_exports(self):
"""
:returns: Version bulk_exports of preview
:rtype: twilio.rest.preview.bulk_exports.BulkExports
"""
if self._bulk_exports is None:
self._bulk_exports = BulkExports(self)
return self._bulk_exports
@property
def deployed_devices(self):
"""
:returns: Version deployed_devices of preview
:rtype: twilio.rest.preview.deployed_devices.DeployedDevices
"""
if self._deployed_devices is None:
self._deployed_devices = DeployedDevices(self)
return self._deployed_devices
@property
def hosted_numbers(self):
"""
:returns: Version hosted_numbers of preview
:rtype: twilio.rest.preview.hosted_numbers.HostedNumbers
"""
if self._hosted_numbers is None:
self._hosted_numbers = HostedNumbers(self)
return self._hosted_numbers
@property
def marketplace(self):
"""
:returns: Version marketplace of preview
:rtype: twilio.rest.preview.marketplace.Marketplace
"""
if self._marketplace is None:
self._marketplace = Marketplace(self)
return self._marketplace
@property
def sync(self):
"""
:returns: Version sync of preview
:rtype: twilio.rest.preview.sync.Sync
"""
if self._sync is None:
self._sync = Sync(self)
return self._sync
@property
def understand(self):
"""
:returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand
"""
if self._understand is None:
self._understand = Understand(self)
return self._understand
@property
def wireless(self):
"""
:returns: Version wireless of preview
:rtype: twilio.rest.preview.wireless.Wireless
"""
if self._wireless is None:
self._wireless = Wireless(self)
return self._wireless
@property
def trusted_comms(self):
"""
:returns: Version trusted_comms of preview
:rtype: twilio.rest.preview.trusted_comms.TrustedComms
"""
if self._trusted_comms is None:
self._trusted_comms = TrustedComms(self)
return self._trusted_comms
@property
def exports(self):
"""
:rtype: twilio.rest.preview.bulk_exports.export.ExportList
"""
return self.bulk_exports.exports
@property
def export_configuration(self):
"""
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationList
"""
return self.bulk_exports.export_configuration
@property
def fleets(self):
"""
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
return self.deployed_devices.fleets
@property
def authorization_documents(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
"""
return self.hosted_numbers.authorization_documents
@property
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
return self.hosted_numbers.hosted_number_orders
@property
def available_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList
"""
return self.marketplace.available_add_ons
@property
def installed_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnList
"""
return self.marketplace.installed_add_ons
@property
def services(self):
"""
:rtype: twilio.rest.preview.sync.service.ServiceList
"""
return self.sync.services
@property
def assistants(self):
"""
:rtype: twilio.rest.preview.understand.assistant.AssistantList
"""
return self.understand.assistants
@property
def commands(self):
"""
:rtype: twilio.rest.preview.wireless.command.CommandList
"""
return self.wireless.commands
@property
def rate_plans(self):
"""
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanList
"""
return self.wireless.rate_plans
@property
def sims(self):
"""
:rtype: twilio.rest.preview.wireless.sim.SimList
"""
return self.wireless.sims
@property
def branded_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.branded_call.BrandedCallList
"""
return self.trusted_comms.branded_calls
@property
def businesses(self):
"""
:rtype: twilio.rest.preview.trusted_comms.business.BusinessList
"""
return self.trusted_comms.businesses
@property
def cps(self):
"""
:rtype: twilio.rest.preview.trusted_comms.cps.CpsList
"""
return self.trusted_comms.cps
@property
def current_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallList
"""
return self.trusted_comms.current_calls
@property
def devices(self):
"""
:rtype: twilio.rest.preview.trusted_comms.device.DeviceList
"""
return self.trusted_comms.devices
@property
def phone_calls(self):
"""
:rtype: twilio.rest.preview.trusted_comms.phone_call.PhoneCallList
"""
return self.trusted_comms.phone_calls
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview>'
| [
"ktailor@Kishans-MacBook-Air.local"
] | ktailor@Kishans-MacBook-Air.local |
fb3123fc3cc659b25547cf2182f90805f6614142 | 9412f4ba84f6b54f67c0d6534ab3804fa621bee8 | /order/migrations/0011_auto__add_field_order_carrier.py | d7dad8989e163e5ead999ccf8f111e992c583e0f | [] | no_license | ruspython/adler-m | 5fbeb44d1a5187d481391e49d6cca86b69d14b7a | c9b27ee7c1794c4632742887599545893621a58d | refs/heads/master | 2020-12-24T14:45:16.535606 | 2014-12-01T10:55:34 | 2014-12-01T10:55:34 | 31,331,474 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,055 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.carrier'
db.add_column(u'order_order', 'carrier',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.carrier'
db.delete_column(u'order_order', 'carrier')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalog.item': {
'Meta': {'ordering': "['id']", 'object_name': 'Item'},
'article': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'brand_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'brand_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'color_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'color_ru': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_just_updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'manufacturer_ru': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'material': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'new_before': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'note_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'note_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'default': "'model'", 'max_length': '16'}),
'series': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'series_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'series_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status_action': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_back_in_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_new': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_not_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_on_the_way': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_without_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['catalog.ItemTag']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type_ru': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'catalog.itemtag': {
'Meta': {'object_name': 'ItemTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'order.order': {
'Meta': {'ordering': "['-add_time']", 'object_name': 'Order'},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address_building': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_flat': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_house': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'address_zipcode': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'carrier': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'client_last_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_second_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'delivery_method': ('django.db.models.fields.CharField', [], {'default': "'postal'", 'max_length': '32', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '32', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'pc'", 'max_length': '2'}),
'payment_status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_price': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'order.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'article': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'discount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Item']"}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['order.Order']"}),
'price': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'quantity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'scale': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['order'] | [
"vlad0058@gmail.com"
] | vlad0058@gmail.com |
b0208119966e1fdb6ebb2df463fa9d87fbde71c4 | e479cf6650db5766ca8435c5a1165e8cb43de0b7 | /evaluate.py | 9cfef8c4942f289387614d90d2854c750e40f0aa | [] | no_license | WangGewu/2020-ai-road-segmentation | 531b9eaaa0a6038b77a5a5fe886748a1ccda42c3 | 5e5c0a161b1bc3424dd351738641c4a367a8c1cf | refs/heads/main | 2023-03-29T05:00:12.529475 | 2021-04-08T13:22:23 | 2021-04-08T13:22:23 | 355,915,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | import numpy as np
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
# print(self.confusion_matrix.sum(axis=1))
# print(self.confusion_matrix.sum())
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
MIoU = np.nanmean(MIoU)
return MIoU
def Mean_Intersection_over_Union_test(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
| [
"noreply@github.com"
] | WangGewu.noreply@github.com |
62f85c84ce0341424f10db40157445ebc85a70e9 | 6b2af072847d22c17344856636054497d1f4f632 | /leetcode/448.find-all-numbers-disappeared-in-an-array.py | ecee2f006429567a5e6fc41126e55ec157ebeef3 | [] | no_license | iplay16/vscode | 2a2dde14550164cc36d76dee4a5bdba6e998f026 | 542626143c4eab527f6bc7143e8778619c8e4857 | refs/heads/master | 2020-04-25T01:55:23.972221 | 2019-10-05T08:40:31 | 2019-10-05T08:40:31 | 172,422,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #
# @lc app=leetcode id=448 lang=python3
#
# [448] Find All Numbers Disappeared in an Array
#
class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
for n in nums:
nums[abs(n)-1]=-abs(nums[abs(n)-1])
res=[]
for i in range(len(nums)):
if(nums[i]>0):
res.append(i+1)
return res
| [
"iplay16@163.com"
] | iplay16@163.com |
3d03ee4b346bb937bb87a78407998a3a3294ea25 | 83aa8b54f55eeeca3c58eb7cc59219fb2c9f6307 | /tests/test_preprocessing.py | 6f8f76c60bd5e75d5dbec29723d48b9ca37d1a36 | [
"MIT"
] | permissive | vftens/RocAlphaGo-aug25-keras2-py35 | 650e8321dfc82b0cfa922afa78e3dc0e92017e49 | 67ad5242ea7a8cdda60b9e10590f7bc9e91447c6 | refs/heads/master | 2023-01-04T07:10:31.509986 | 2020-10-24T18:17:50 | 2020-10-24T18:17:50 | 306,944,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,207 | py | import unittest
from . import parseboard
import numpy as np
import AlphaGo.go as go
from AlphaGo.go import GameState
from AlphaGo.preprocessing.preprocessing import Preprocess
def simple_board():
"""
"""
gs = GameState(size=7)
# make a tiny board for the sake of testing and hand-coding expected results
#
# X
# 0 1 2 3 4 5 6
# B W . . . . . 0
# B W . . . . . 1
# B . . . B . . 2
# Y . . . B k B . 3
# . . . W B W . 4
# . . . . W . . 5
# . . . . . . . 6
#
# where k is a ko position (white was just captured)
# ladder-looking thing in the top-left
gs.do_move((0, 0)) # B
gs.do_move((1, 0)) # W
gs.do_move((0, 1)) # B
gs.do_move((1, 1)) # W
gs.do_move((0, 2)) # B
# ko position in the middle
gs.do_move((3, 4)) # W
gs.do_move((3, 3)) # B
gs.do_move((4, 5)) # W
gs.do_move((4, 2)) # B
gs.do_move((5, 4)) # W
gs.do_move((5, 3)) # B
gs.do_move((4, 3)) # W - the ko position
gs.do_move((4, 4)) # B - does the capture
return gs
def self_atari_board():
"""
"""
gs = GameState(size=7)
# another tiny board for testing self-atari specifically.
# positions marked with 'a' are self-atari for black
#
# X
# 0 1 2 3 4 5 6
# a W . . . W B 0
# . . . . . . . 1
# . . . . . . . 2
# Y . . W . W . . 3
# . W B a B W . 4
# . . W W W . . 5
# . . . . . . . 6
#
# current_player = black
gs.do_move((2, 4), go.BLACK)
gs.do_move((4, 4), go.BLACK)
gs.do_move((6, 0), go.BLACK)
gs.do_move((1, 0), go.WHITE)
gs.do_move((5, 0), go.WHITE)
gs.do_move((2, 3), go.WHITE)
gs.do_move((4, 3), go.WHITE)
gs.do_move((1, 4), go.WHITE)
gs.do_move((5, 4), go.WHITE)
gs.do_move((2, 5), go.WHITE)
gs.do_move((3, 5), go.WHITE)
gs.do_move((4, 5), go.WHITE)
return gs
def capture_board():
"""
"""
gs = GameState(size=7)
# another small board, this one with imminent captures
#
# X
# 0 1 2 3 4 5 6
# . . B B . . . 0
# . B W W B . . 1
# . B W . . . . 2
# Y . . B . . . . 3
# . . . . W B . 4
# . . . W . W B 5
# . . . . W B . 6
#
# current_player = black
black = [(2, 0), (3, 0), (1, 1), (4, 1), (1, 2), (2, 3), (5, 4), (6, 5), (5, 6)]
white = [(2, 1), (3, 1), (2, 2), (4, 4), (3, 5), (5, 5), (4, 6)]
for B in black:
gs.do_move(B, go.BLACK)
for W in white:
gs.do_move(W, go.WHITE)
gs.set_current_player(go.BLACK)
return gs
class TestPreprocessingFeatures(unittest.TestCase):
"""Test the functions in preprocessing.py
note that the hand-coded features look backwards from what is depicted
in simple_board() because of the x/y column/row transpose thing (i.e.
numpy is typically thought of as indexing rows first, but we use (x,y)
indexes, so a numpy row is like a go column and vice versa)
"""
def test_get_board(self):
gs = simple_board()
pp = Preprocess(["board"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
white_pos = np.asarray([
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
black_pos = np.asarray([
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
empty_pos = np.ones((gs.get_size(), gs.get_size())) - (white_pos + black_pos)
# check number of planes
self.assertEqual(feature.shape, (gs.get_size(), gs.get_size(), 3))
# check return value against hand-coded expectation
# (given that current_player is white)
self.assertTrue(np.all(feature == np.dstack((white_pos, black_pos, empty_pos))))
def test_get_turns_since(self):
"""
"""
gs = simple_board()
pp = Preprocess(["turns_since"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_turns = np.zeros((gs.get_size(), gs.get_size(), 8))
rev_moves = list(gs.get_history())
rev_moves = rev_moves[::-1]
board = gs.get_board()
for x in range(gs.get_size()):
for y in range(gs.get_size()):
if board[x, y] != go.EMPTY:
# find most recent move at x, y
age = rev_moves.index((x, y))
one_hot_turns[x, y, min(age, 7)] = 1
self.assertTrue(np.all(feature == one_hot_turns))
def test_get_liberties(self):
"""
"""
gs = simple_board()
pp = Preprocess(["liberties"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
# todo - test liberties when > 8
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
# black piece at (4,4) has a single liberty: (4,3)
one_hot_liberties[4, 4, 0] = 1
# the black group in the top left corner has 2 liberties
one_hot_liberties[0, 0:3, 1] = 1
# .. as do the white pieces on the left and right of the eye
one_hot_liberties[3, 4, 1] = 1
one_hot_liberties[5, 4, 1] = 1
# the white group in the top left corner has 3 liberties
one_hot_liberties[1, 0:2, 2] = 1
# ...as does the white piece at (4,5)
one_hot_liberties[4, 5, 2] = 1
# ...and the black pieces on the sides of the eye
one_hot_liberties[3, 3, 2] = 1
one_hot_liberties[5, 3, 2] = 1
# the black piece at (4,2) has 4 liberties
one_hot_liberties[4, 2, 3] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties" % (i + 1))
def test_get_capture_size(self):
"""
"""
gs = capture_board()
pp = Preprocess(["capture_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
score_before = gs.get_captures_white()
one_hot_capture = np.zeros((gs.get_size(), gs.get_size(), 8))
# there is no capture available; all legal moves are zero-capture
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
num_captured = copy.get_captures_white() - score_before
one_hot_capture[x, y, min(7, num_captured)] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_capture[:, :, i]),
"bad expectation: capturing %d stones" % i)
def test_get_self_atari_size(self):
"""
"""
gs = self_atari_board()
pp = Preprocess(["self_atari_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_self_atari = np.zeros((gs.get_size(), gs.get_size(), 8))
# self atari of size 1 at position 0,0
one_hot_self_atari[0, 0, 0] = 1
# self atari of size 3 at position 3,4
one_hot_self_atari[3, 4, 2] = 1
self.assertTrue(np.all(feature == one_hot_self_atari))
def test_get_self_atari_size_cap(self):
"""
"""
gs = capture_board()
pp = Preprocess(["self_atari_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_self_atari = np.zeros((gs.get_size(), gs.get_size(), 8))
# self atari of size 1 at the ko position and just below it
one_hot_self_atari[4, 5, 0] = 1
one_hot_self_atari[3, 6, 0] = 1
# self atari of size 3 at bottom corner
one_hot_self_atari[6, 6, 2] = 1
self.assertTrue(np.all(feature == one_hot_self_atari))
def test_get_liberties_after(self):
"""
"""
gs = simple_board()
pp = Preprocess(["liberties_after"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
# TODO (?) hand-code?
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
liberty = copy.get_liberty()
libs = liberty[x, y]
if libs < 7:
one_hot_liberties[x, y, libs - 1] = 1
else:
one_hot_liberties[x, y, 7] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties after move" % (i + 1))
def test_get_liberties_after_cap(self):
"""
A copy of test_get_liberties_after but where captures are imminent
"""
gs = capture_board()
pp = Preprocess(["liberties_after"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_liberties = np.zeros((gs.get_size(), gs.get_size(), 8))
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
liberty = copy.get_liberty()
libs = liberty[x, y]
one_hot_liberties[x, y, min(libs - 1, 7)] = 1
for i in range(8):
self.assertTrue(
np.all(feature[:, :, i] == one_hot_liberties[:, :, i]),
"bad expectation: stones with %d liberties after move" % (i + 1))
def test_get_ladder_capture(self):
"""
"""
gs, moves = parseboard.parse(". . . . . . .|"
"B W a . . . .|"
". B . . . . .|"
". . . . . . .|"
". . . . . . .|"
". . . . . W .|")
pp = Preprocess(["ladder_capture"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_ladder_escape(self):
"""
"""
# On this board, playing at 'a' is ladder escape because there is a breaker on the right.
gs, moves = parseboard.parse(". B B . . . .|"
"B W a . . . .|"
". B . . . . .|"
". . . . . W .|"
". . . . . . .|"
". . . . . . .|")
pp = Preprocess(["ladder_escape"], size=7)
gs.set_current_player(go.WHITE)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
self.assertTrue(np.all(expectation == feature))
def test_two_escapes(self):
"""
"""
gs, moves = parseboard.parse(". . X . . .|"
". X O a . .|"
". X c X . .|"
". O X b . .|"
". . O . . .|"
". . . . . .|")
# place a white stone at c, and reset player to white
gs.do_move(moves['c'], color=go.WHITE)
gs.set_current_player(go.WHITE)
pp = Preprocess(["ladder_escape"], size=6)
gs.set_current_player(go.WHITE)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
# both 'a' and 'b' should be considered escape moves for white after 'O' at c
expectation = np.zeros((gs.get_size(), gs.get_size()))
expectation[moves['a']] = 1
expectation[moves['b']] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_sensibleness(self):
"""
"""
# TODO - there are no legal eyes at the moment
gs = simple_board()
pp = Preprocess(["sensibleness"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
for (x, y) in gs.get_legal_moves():
if not (gs.is_eye((x, y), go.WHITE)):
expectation[x, y] = 1
self.assertTrue(np.all(expectation == feature))
def test_get_legal(self):
"""
"""
gs = simple_board()
pp = Preprocess(["legal"], size=7)
feature = pp.state_to_tensor(gs)[0, 0] # 1D tensor; no need to transpose
expectation = np.zeros((gs.get_size(), gs.get_size()))
for (x, y) in gs.get_legal_moves():
expectation[x, y] = 1
self.assertTrue(np.all(expectation == feature))
def test_feature_concatenation(self):
"""
"""
gs = simple_board()
pp = Preprocess(["board", "sensibleness", "capture_size"], size=7)
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
expectation = np.zeros((gs.get_size(), gs.get_size(), 3 + 1 + 8))
board = gs.get_board()
# first three planes: board
expectation[:, :, 0] = (board == go.WHITE) * 1
expectation[:, :, 1] = (board == go.BLACK) * 1
expectation[:, :, 2] = (board == go.EMPTY) * 1
# 4th plane: sensibleness (as in test_get_sensibleness)
for (x, y) in gs.get_legal_moves():
if not (gs.is_eye((x, y), go.WHITE)):
expectation[x, y, 3] = 1
# 5th through 12th plane: capture size (all zero-capture)
for (x, y) in gs.get_legal_moves():
expectation[x, y, 4] = 1
self.assertTrue(np.all(expectation == feature))
if __name__ == '__main__':
unittest.main()
| [
"aico@ya.ru"
] | aico@ya.ru |
ca64375f7c116ff02f021ac13a54ca325742e802 | 56f9208443ae7f3dc6b06ee840e58b2edc74b627 | /ll_env/bin/django-admin.py | a6d719b9040c4d62203d5a90b9b2b9a4a5c4978e | [] | no_license | haruyamu/learing_log | d3891f2f66ff634feb36b52eaa04f251c4fdb571 | be85bff1ccb59f46c7ef332e8a0e4fd9c1530678 | refs/heads/main | 2023-02-24T18:53:40.675780 | 2021-01-27T02:34:23 | 2021-01-27T02:34:23 | 332,979,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #!/Users/haruya/projects/learing_log/ll_env/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"haruya.20020331@icloud.com"
] | haruya.20020331@icloud.com |
d57958a6781674de72dcaadbfbe121da93e285e6 | b9499f3f235e5da9c3e83782d114ad41586e7bcd | /data_utils.py | 4b8623b9df9bd5b974c4a8d17e921d339690cdc7 | [
"MIT"
] | permissive | maremita/-fork-Phylo_structural_EM | 9e3e57e6bd5406996d52e77d01b69ff2c7340d61 | 609fdd94874e02b02101649033390335e34c43cb | refs/heads/master | 2023-08-30T04:55:49.227399 | 2020-03-19T10:44:58 | 2020-03-19T10:44:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | import numpy as np
import re
import networkx as nx
# define parameters
nuc_names = ['A', 'C', 'G', 'T']
transform = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
regex = re.compile('[ACGT]')
def get_char_data(name='infile'):
"""
Returns numerical representation of dna sequences, number of sequences (N), length of sequences (M).
A: 0, T: 1, C: 2, G: 3
"""
regex = re.compile('[ATCG]')
# transform = {'A': 0, 'T': 1, 'C': 2, 'G': 3}
sequences, N, M = parse_data(name)
numeric_sequences = np.chararray((N, M), unicode=True)
n = 0
for seq in sequences:
sites = re.findall(regex, seq)
if len(sites) == 0 or sites is None:
continue
sites = sites[-M:]
numeric_sequences[n, :] = np.array([site for site in sites])
n += 1
return numeric_sequences, N, M
def parse_data(name='infile'):
f = open(name, 'r')
sequences = []
meta_data = f.readline().split()
num_sequences = int(meta_data[0])
len_sequences = int(meta_data[1])
for i in range(len_sequences):
seq = f.readline().strip('\n')
sequences.append(seq)
return sequences, num_sequences, len_sequences
# simulate sequences given the tree topology and rate matrices
def simulate_seq(tree, evo_model, ndata=10):
n_nodes = len(tree)
root = n_nodes - 1
n_leaves = (n_nodes + 1) // 2
pt_matrix = [np.zeros((4, 4)) for i in range(2 * n_leaves - 2)]
# do postorder tree traversal to compute the transition matrices
for node in nx.dfs_postorder_nodes(tree, root):
if not tree.nodes[node]['type'] == 'root':
t = tree.nodes[node]['t']
pt_matrix[node] = evo_model.trans_matrix(t)
simuData = []
status = [''] * (2 * n_leaves - 1)
for run in range(ndata):
for node in nx.dfs_preorder_nodes(tree, root):
if tree.nodes[node]['type'] == 'root':
status[node] = np.random.choice(4, size=1, p=evo_model.stat_prob)[0]
else:
parent = tree.nodes[node]['parent']
status[node] = np.random.choice(4, size=1, p=pt_matrix[node][status[parent]])[0]
simuData.append([nuc_names[i] for i in status[:n_leaves]])
return np.transpose(simuData)
| [
"okviman@kth.se"
] | okviman@kth.se |
7c9c32d90f97ed66f476c030951ca39cf376ba56 | dd5d54eb45b8993769310a679c14b20600005793 | /data/process_data.py | 338c21443b179bace0b3b4d1f8e2e8cada447d81 | [
"MIT"
] | permissive | amalpm-rog/Disaster-Response-Pipeline | b5d3653ac9741ea841b67c9f94486afb30766cf3 | 47688decc1d8d0daa2be3ab9edb8bb391a1d1661 | refs/heads/master | 2022-11-15T12:10:20.539038 | 2020-07-06T05:36:20 | 2020-07-06T05:36:20 | 277,448,793 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | #! /usr/bin/env python3
# coding=utf-8
# The Data processing module
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Load two csv files into pandas dataframes and
merge them into one.
Parameters
----------
messages_filepath : string
location of the messages csv file
categories_filepath : string
location of the categories csv file
Returns
-------
pandas.DataFrame
The merged dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return pd.merge(messages, categories, on='id')
def clean_data(df):
"""
Process a dataframe
Parameters
----------
df: pandas.DataFrame
The pandas.Dataframe to be processed
Returns
-------
pandas.DataFrame
The processed dataframe
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(pat=';', n=None, expand=True)
# use the first row to extract a list of new column names for categories.
category_colnames = categories.iloc[0].str[:-2]
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
# and cast to int
categories[column] = categories[column].str[-1].astype(int)
# drop the original categories column from `df`
df = df.drop(columns='categories')
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# drop duplicates
df = df.drop_duplicates(keep='first')
return df
def save_data(df, database_filename):
"""
Writes a dataframe to a Sql-lite Database
Parameters
----------
df: pandas.DataFrame
The pandas.Dataframe to be written
database_filename: string
The filename path for the database
Returns
-------
None
"""
print('Writing {} to {} database: '.format(df, database_filename))
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('messages_categories', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '
'datasets as the first and second argument respectively, as '
'well as the filepath of the database to save the cleaned data '
'to as the third argument. \n\nExample: python process_data.py '
'disaster_messages.csv disaster_categories.csv '
'DisasterResponse.db')
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | amalpm-rog.noreply@github.com |
83888354f0b783bb8f6c9e830474067926de5f17 | 7d4667ee455337014760a7e23b9556c30d358b25 | /Web2_0course/buyagrade/cardValidate.py | c0cc154f6d1542837d2e0826f1b482ece02bd129 | [] | no_license | joyeecheung/WebHWs | 786d6bfcfd3b0a0822a73f7b94b7d6673fd89568 | 9f9725187ef7ce6d496e3368a9c7acbd512f27f2 | refs/heads/master | 2020-12-25T17:05:30.659229 | 2015-02-11T19:17:41 | 2015-02-11T19:17:41 | 13,560,924 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | """ Functions for validating credit card numbers. """
import re
def IsValidChecksum(number):
""" Checks if the card number passes a luhn mod-10 checksum. """
numlist = [int(x) for x in reversed(str(number)) if x.isdigit()]
# digits that count once
count = sum(x for i, x in enumerate(numlist) if i % 2 == 0)
# digits that count double (add digits of double value)
count += sum(sum(divmod(2 * x, 10)) for i, x in enumerate(numlist) if i % 2 != 0)
return (count % 10 == 0)
def IsValidCharacters(number):
"""
Checks if the number only contains digits and '-'.
If the digits are grouped, checks if they are grouped correctly.
"""
if re.compile('^[-0-9]*$').match(number):
return True
else:
return re.compile('^([0-9]{4}[-])*([0-9]{4})$').match(number) != None
def IsValidPattern(number, type):
""" Checks to make sure that the card number match the CC pattern. """
CC_PATTERNS = {
'mastercard':'^5[12345]([0-9]{14})$',
'visa' :'^4([0-9]{15})$',
}
return re.compile(CC_PATTERNS[type]).match(number) != None
def IsValid(number, type):
if IsValidCharacters(number):
clean = number.replace('-', '')
if IsValidPattern(clean, type):
return IsValidChecksum(clean)
return False | [
"joyeecheung@joyeecheung-virtual-machine.(none)"
] | joyeecheung@joyeecheung-virtual-machine.(none) |
da01705f525324f663168eb74c1a77e66c4cc174 | 417b516b7c15779a8f93511a09ca213017d22415 | /app/members/models.py | 86ca7fd32e3082d6975e7a69d35f8d72a7ef0fc5 | [
"MIT"
] | permissive | krakiun/chargeflask | 453f631cb5ba5a05ebeb1d318cce1b84e9637b18 | dabca234a07550889927dd308c7d4ef8923da943 | refs/heads/master | 2021-08-14T06:55:56.017209 | 2017-11-13T20:39:18 | 2017-11-13T20:39:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | """
filename: members.py
description: Model for Members in Committees.
created by: Omar De La Hoz (oed7416@rit.edu)
created on: 08/31/17
"""
from app import db
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
members_table = db.Table('members', db.Model.metadata,
db.Column('committees_id', db.String(255), db.ForeignKey('committees.id')),
db.Column('users_id', db.String(255), db.ForeignKey('users.id'))
)
| [
"omar.dlhz@hotmail.com"
] | omar.dlhz@hotmail.com |
72c6b5820ec2373fc5c053015b127eae12ba7b5d | 74fb05c7b5eddf2b368e181f38b9423a711bf2e0 | /real_python_tutorails/iterators/iterators_example.py | ae43af08beec8d63c2765d453574e4ff98b5c5cb | [] | no_license | musram/python_progs | 426dcd4786e89b985e43284ab5a5b1ba79cb285e | ad1f7f2b87568ba653f839fe8fa45e90cbde5a63 | refs/heads/master | 2022-11-10T09:53:29.993436 | 2020-06-21T00:21:23 | 2020-06-21T00:21:23 | 264,607,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py |
if __name__ == "__main__":
names = ['sai', 'asi', 'isa']
for name in names:
print(name)
#what actuall happens internally is this:
it = names.__iter__()
print(next(it))
#similalry
f = open('/etc/passwd', 'r')
it = f.__iter__()
print(next(it))
#writing generator
#(1)
def countDown(n):
print('Counting from' , n)
while (n > 0):
yield n
n -= 1
print('Done')
for x in countDown(5):
print(x)
#this is same as
c = countDown(5)
it = c.__iter__()
print(next(it))
#writing genertor
#(2)
it = ( x for x in range(5,0,-1))
print(next(it))
#writing generator
#(3)
class CountDown:
def __init__(self, n):
self.n = n
def __iter__(self):
n = self.n
while (n > 0):
yield n
n -= 1
c = CountDown(5)
for x in c:
print(x)
import os
import time
def follow(filename):
f = open(filename, 'r')
f.seek(0, os.SEEK_END)
while True:
line = f.readline()
if not line:
time.sleep(0.1)
continue
yield line
for line in follow('/etc/passwd'):
row = line.split(',')
print(row)
| [
"musram@gmail.com"
] | musram@gmail.com |
6b91829cc22bc82d5f07ab40c654250f7a903fbb | dea24559930c75ed7fd6016464e6844644e7bf06 | /plot_kolmogorov.py | ad925c613472e1f535acd0edfc6a4c1ffb2a3f2f | [] | no_license | sheyma/fitzefatze | 45f8da30f9bf4403c27feac6529da2367c9a391b | 11c6839c86fe131ab803f9e39d4fe738b75e8b54 | refs/heads/master | 2021-01-11T22:19:01.351693 | 2017-01-26T14:56:18 | 2017-01-26T14:56:18 | 78,947,686 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as pl
import sys, glob, os
from scipy import stats
import collections
from math import factorial, sqrt, ceil
# check the loaded matrix if it is symmetric
def load_matrix(file):
A = np.loadtxt(file, unpack=True)
AT = np.transpose(A)
# check the symmetry
if A.shape[0] != A.shape[1] or not (A == AT).all():
print "error: loaded matrix is not symmetric"
raise ValueError
return AT
def corr_histo(corr_matrix):
corr_flat = np.ndarray.flatten(corr_matrix)
corr_max = 1.0
corr_min = -1.0
bin_nu = 100
# get a normalized histogram
hist, bin_edges = np.histogram(corr_flat, bins=bin_nu,
range=[corr_min, corr_max], normed =True)
return hist
def compare_kolmo(name_A, name_B, THR, SIG):
R_thr = {}
for THR in thr_array :
R_temp = []
for SIG in sig_array :
input_A = name_A % (THR, SIG)
input_B = name_B % (THR, SIG)
mtx_A = load_matrix(input_A)
HistA = corr_histo(mtx_A)
mtx_B = load_matrix(input_B)
HistB = corr_histo(mtx_B)
diff, p = stats.ks_2samp(HistA, HistB)
R_val = diff
R_temp = np.append(R_temp, R_val)
R_thr[THR] = np.array(R_temp)
Ordered_R = collections.OrderedDict(sorted(R_thr.items()))
datam = np.array(Ordered_R.values())
return datam
data_brain = '/run/media/sheyma/0a5437d3-d51c-4c40-8c7a-06738fd0c83a/sheyma_bayrak_2015/jobs_corr/'
name_brain = data_brain + 'acp_w_0_ADJ_thr_0.%02d_sigma=%g_D=0.05_v=30.0_tmax=45000_FHN_corr.dat'
data_random = '/var/tmp/fitzefatze-hydra/jobs_erdos01/'
name_random = data_random + 'acp_w_thr_0.%02d_erdos_sigma=%.3f_D=0.05_v=30.0_tmax=45000_pearson.dat'
thr_array = np.arange(34, 86, 4)
sig_array = np.array([0.050, 0.045, 0.040, 0.035, 0.030, 0.025, 0.020, 0.015, 0.010, 0.005 ])
KS = compare_kolmo(name_brain, name_random, thr_array, sig_array)
#Parameter Space Plot
fig, ax = pl.subplots(figsize=(15,12))
pl.subplots_adjust(left=0.15, right=0.95, top=0.93, bottom=0.13)
pl.subplot(1,1,1)
pl.imshow(np.transpose(KS), interpolation='nearest',
cmap='jet', aspect='auto')
a = np.array([0.38, 0.50, 0.62, 0.74])
b = np.array([0.05, 0.04, 0.03, 0.02, 0.01])
separ_xthick = ceil(float(len(thr_array))/len(a)) -1
pl.xticks(np.arange(1,len(thr_array), separ_xthick), a, fontsize = 50)
pl.yticks([0, 2, 4, 6, 8], b, fontsize = 50)
pl.tick_params(which='major', length=12, width=5)
pl.ylabel('$c$', fontsize = 50)
pl.xlabel('$p$', fontsize = 50)
cbar = pl.colorbar()
cbar.ax.set_title('d', fontsize = 50)
for t in cbar.ax.get_yticklabels():
t.set_fontsize(50)
pl.show()
| [
"sheymaba@gmail.com"
] | sheymaba@gmail.com |
e0a10d5037f48480969cd86d793755f6b876565a | b2654c3003c7de93b24c84d6acce9bcdb2cad826 | /07_function_objects.py | 384c804218176b869e4385cd2bf0caf45d48041d | [] | no_license | sonicbrcm/dive-into-cpython | eef9ab16bb10aa82555c0221a17b28c578b34afd | 1e17fbaacdfb0d546fda1bbc6d79361de417caf0 | refs/heads/master | 2021-12-22T04:13:05.784313 | 2017-10-10T08:34:09 | 2017-10-10T08:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | PyCodeObject
cpython/Include/code.h
cpython/Objects/codeobject.c
PyFunctionObject
cpython/Include/funcobject.h
cpython/Objects/funcobject.c
| [
"hexiaowei91@163.com"
] | hexiaowei91@163.com |
f927f5c141a4938dceb50e383256385903c97b7d | 7e74dae3390c0dd2751c9353c6051350ab4f1d86 | /blog_python/blog/migrations/0014_auto_20201015_2350.py | 30bf6597ab55111815756e299f6211904d794eb1 | [] | no_license | Eugin-Paul/Blog | 70c327198fb93ef569246a02f9a56f2d57d0b53e | 7f562b3cd17f477e5ea97ba04132554262c3f9b5 | refs/heads/master | 2023-01-01T09:02:41.702171 | 2020-10-22T07:15:38 | 2020-10-22T07:15:38 | 305,793,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # Generated by Django 3.0.7 on 2020-10-15 18:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20201015_2341'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='content',
new_name='comment',
),
]
| [
"euginpaul1717@gmail.com"
] | euginpaul1717@gmail.com |
23ea96da8e03e7b4983c3084a4f0f423d4cca8a5 | 18f5c71436a22da1c3835b562f1538722114f2af | /backend/api/urls.py | a587b3e16148b0d73e2da279b169f27c4f01b4e0 | [
"MIT"
] | permissive | timakaryo/antrean | 54968fd960e8d886a6224d5d5a6f21c212f6acad | 8eab42a0a17092355adc9b56d2f29dbf53fa2a54 | refs/heads/master | 2021-01-20T21:09:17.098029 | 2017-10-23T13:00:42 | 2017-10-23T13:00:42 | 101,754,612 | 0 | 0 | null | 2017-10-23T13:00:43 | 2017-08-29T11:39:50 | Python | UTF-8 | Python | false | false | 144 | py | from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^token/', obtain_jwt_token)
]
| [
"chiputera@gmail.com"
] | chiputera@gmail.com |
dd5fbc68c39d3c24641b9f746e2812d44fa78e62 | e6d4a87dcf98e93bab92faa03f1b16253b728ac9 | /algorithms/python/destinationCity/destinationCity.py | 1b55d8c23b19986d5f6d1359d7af30216a4080a4 | [] | no_license | MichelleZ/leetcode | b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f | a390adeeb71e997b3c1a56c479825d4adda07ef9 | refs/heads/main | 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/destination-city/
# Author: Miao Zhang
# Date: 2021-05-06
class Solution:
def destCity(self, paths: List[List[str]]) -> str:
ind = collections.defaultdict(int)
out = collections.defaultdict(int)
for u, v in paths:
ind[v] += 1
out[u] += 1
for city, val in ind.items():
if val == 1 and out[city] == 0:
return city
return ''
| [
"zhangdaxiaomiao@163.com"
] | zhangdaxiaomiao@163.com |
a516a1d9d6566da6c0e8403dbfd46b44eaa1bf43 | 5b3e4b9263c2fcbec1fc5890e4a6035aeb9637f7 | /case/test_cate_gory_second.py | ad6bc35d090a77b8d6c162699efc3033bd566863 | [] | no_license | cheng2020-G/fastapp | 68216bcba5860a51a765c64989b5e99dc53b2e3e | d55d7d3a53337a2bd1792093c39169b0e31f1cf8 | refs/heads/master | 2023-04-21T11:39:22.590349 | 2021-05-27T05:40:15 | 2021-05-27T05:40:15 | 365,464,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | import re
from basecase.basecase import BaseCase
class TestCateGorySecond(BaseCase):
def test_cate_gory_second(self):
res = self.cate_gory_second.cate_gory_second()
print('请求url:' + res.url)
print('requestId:' + res.headers['requestId'])
print(res.json())
# print(res.json()['data']['books'])
assert res.status_code == 200
assert res.json()['retCode'] == 0
assert re.search(r'\d', str(res.json()['data']['isMore']))
# assert re.search(r'\d', str(res.json()['data']['sortMark'][0]['markId']))
# assert re.search(r'\w+', str(res.json()['data']['sortMark'][0]['title']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['author']))
assert re.search(r'\d', str(res.json()['data']['books'][0]['resFormat']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['iconDesc']))
assert re.search(r'http://\w+', str(res.json()['data']['books'][0]['coverWap']))
assert re.search(r'\d', str(res.json()['data']['books'][0]['iconType']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['clickNum']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['totalWordSize']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['bookName']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['introduction']))
assert re.search(r'\d+', str(res.json()['data']['books'][0]['bookId']))
assert re.search(r'\w+', str(res.json()['data']['books'][0]['status']))
# assert re.search(r'\d', str(res.json()['data']['statusMark'][0]['markId']))
# assert re.search(r'\w+', str(res.json()['data']['statusMark'][0]['title']))
| [
"1007884377@qq.com"
] | 1007884377@qq.com |
bbb6ba991cd66865d214bb9394d18bf5215976cd | 7bb1c8cabeda75bd7db913e2396da383a4e7ba83 | /smtpapi/send.py | 3299ba75e99343761e54794f47893946e7bb390d | [] | no_license | dlinsg/test | 273945d72a2d210e96ac441c1c8a60ab0bd5d7a0 | 4804381299797c4ff61bc23aa95b0cd306f93f5f | refs/heads/master | 2021-01-17T10:06:44.653743 | 2016-04-10T23:00:32 | 2016-04-10T23:00:32 | 23,331,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #!/usr/bin/python
# coding: utf-8
import datetime
today = str(datetime.date.today().strftime('%m/%d/%Y'))
import sendgrid
sg = sendgrid.SendGridClient('dlintestapi', 'testingapi123')
message = sendgrid.Mail()
message.set_from('Dave Lin <david.lin@sendgrid.com>')
message.add_to('David Lin <david.lin@sendgrid.com>')
message.set_subject("Hello %tag1% García, your résumé balance is %tag2% as of %tag3%. Thank you and 谢谢!!!")
message.set_text("test email")
message.add_substitution("%tag1%", "José")
message.add_substitution("%tag2%", "1.234£")
message.add_substitution("%tag3%", today)
status, msg = sg.send(message)
print str(status) + ' ' + msg
| [
"david.lin@sendgrid.com"
] | david.lin@sendgrid.com |
8aa6225c10b41ae45a7dcca40c4dab0d5f1bbe27 | 8652ad554a5fc6076ddae6a869576b6360438a0f | /boilerplate.py | f8d7b3708ef6a230941929700669295cddc6d494 | [] | no_license | dipamsen/Pygame-Intro-Code | b79bb2c554332275c5042efc44cffc32e1711da3 | f4316677d965f140c1ba38b6af26620bf2ddeae0 | refs/heads/main | 2023-06-17T11:01:34.632924 | 2021-07-16T07:17:19 | 2021-07-16T07:17:19 | 386,546,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from colors import *
import pygame
# unnecessary imports for vscode intellisense
import pygame.display
import pygame.time
import pygame.draw
import pygame.mouse
import pygame.event
import pygame.image
import pygame.transform
import math
pygame.init()
FPS = 60
WIDTH = 400
HEIGHT = 400
pygame.display.set_caption("PyGame: Sketch Title")
pygame.display.set_icon(pygame.image.load("logo.png"))
win = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
def redraw_game_window():
pygame.display.update()
win.fill(DARK_GREY)
# mainloop
run = True
while run:
redraw_game_window()
MOUSE = pygame.mouse.get_pos()
pygame.draw.circle(win, (150, 150, 150), MOUSE, 20)
pygame.draw.circle(win, WHITE, MOUSE, 20, 2)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
clock.tick(FPS)
pygame.quit()
| [
"dipamdiptam@gmail.com"
] | dipamdiptam@gmail.com |
5fa12fe4e4a40d5f551f4cbcfc107b8a901841e6 | db774c9a29620e8374740f7a30bd27df40cd8da3 | /pars/bin/easy_install | 2f05c1d74e3ab9b64838db9f13794f43ed9943bd | [] | no_license | Rakhimzhan312/Part2Task19Parsinglalafo | 7330b79912d96c05f9b32a8adf08a628df587378 | 1f7edf5ce3e65164799f0f2dfcc137e33a1b0c7d | refs/heads/master | 2020-09-15T22:54:34.115603 | 2019-11-23T11:17:30 | 2019-11-23T11:17:30 | 223,576,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | #!/home/rakhimzhan/Desktop/Tasks/Chapterparsing/parsingtest/pars/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rakhimzhan312@gmail.com git config --global user.name Rakhimzhangit push -u origin mastergit statusgit push origin mastergit config --global user.email rakhimzhan312@gmail.com"
] | rakhimzhan312@gmail.com git config --global user.name Rakhimzhangit push -u origin mastergit statusgit push origin mastergit config --global user.email rakhimzhan312@gmail.com | |
441e3e75fd6b5ef8cc403e0b4b73843eb432393c | 62c6e50d148f1ccd51001abedbfe748fda94427e | /backend/cookieapp/views.py | 65b7b4217bfa0991bcd696807104284c0951ead4 | [] | no_license | i7-Ryzen/django-jwt-httponly-cookie | be27936d0d7111688a0b2d5811edd891c2b5c925 | bb21ae75b05f7b42e98da6a69f9280c51a1171fd | refs/heads/main | 2023-05-06T15:30:01.870387 | 2021-05-24T05:35:10 | 2021-05-24T05:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | from rest_framework_simplejwt.tokens import RefreshToken
from django.middleware import csrf
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth import authenticate
from django.conf import settings
from rest_framework import status
def get_tokens_for_user(user):
refresh = RefreshToken.for_user(user)
return {
'refresh': str(refresh),
'access': str(refresh.access_token),
}
class LoginView(APIView):
def post(self, request, format=None):
data = request.data
response = Response()
username = data.get('username', None)
password = data.get('password', None)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
data = get_tokens_for_user(user)
response.set_cookie(
key = settings.SIMPLE_JWT['AUTH_COOKIE'],
value = data["access"],
expires = settings.SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'],
secure = settings.SIMPLE_JWT['AUTH_COOKIE_SECURE'],
httponly = settings.SIMPLE_JWT['AUTH_COOKIE_HTTP_ONLY'],
samesite = settings.SIMPLE_JWT['AUTH_COOKIE_SAMESITE']
)
csrf.get_token(request)
response.data = {"Success" : "Login successfully","data":data}
return response
else:
return Response({"No active" : "This account is not active!!"}, status=status.HTTP_404_NOT_FOUND)
else:
return Response({"Invalid" : "Invalid username or password!!"}, status=status.HTTP_404_NOT_FOUND) | [
"abhishekk580@gmail.com"
] | abhishekk580@gmail.com |
639425d836fa30470f16437a549f853bba95ed67 | b92d4895baa78683e328a32295114e70e47b7ff2 | /genetics/phasing/make_readbackPhasing_samplesheet.py | 6937323194973c13ce7b72758dc851164d21273d | [] | no_license | npklein/random_scripts | dcd367b465b6c32da90f7390372eee1da1d78c41 | 11eda8a4889ab533be7eff19e493fd66938ba3a3 | refs/heads/master | 2021-01-19T22:49:32.887873 | 2018-05-28T13:35:08 | 2018-05-28T13:35:08 | 88,863,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,528 | py | import os
seen = []
with open('sample_individual_idLink.txt') as input_file, open('individual_bam_link.txt','w') as out:
out.write('individualID,sampleName,bam\n')
input_file.readline()
for line in input_file:
sample_id = line.split('\t')[0]
individual_id = line.strip().split('\t')[1]
if individual_id in seen:
continue
seen.append(individual_id)
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/diagnostic/mergeAndReheaderBAMs/results/'+individual_id+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
print(individual_id,' bam not found')
print(path)
continue
out.write(individual_id+','+individual_id+','+bam_file+'\n')
converter = {}
with open('/groups/umcg-bios/tmp03/projects/bbmriSampleInfo/sampleSheetDirectlyFromMdb26-01-2016.txt') as lldeepConverter:
for line in lldeepConverter:
line = line.split('\t')
id = line[0]
runID = line[1]
converter[id] = runID
#LL-LLDeep_0043 BD1NR9ACXX-4-19
with open('freeze2_complete_GTE_Groningen_07092016.txt') as input_file,open('individual_bam_link.txt','a') as out:
input_file.readline()
for line in input_file:
line = line.strip().split('\t')
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/BAMsReadGroupsAdded/'+line[1]+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
lldeep_id = '_'.join(line[0].split('_')[1:])
newID = converter['LL-'+lldeep_id]
line[0] = newID
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/BAMsReadGroupsAdded/'+newID+'.mdup.sorted.readGroupsAdded.bam'
if os.path.exists(path):
bam_file = path
else:
print(path+' bam not found')
continue
out.write(line[0]+','+line[1]+','+bam_file+'\n')
with open('lldeepNotInBiosSamples.txt') as input_file, open('individual_bam_link.txt','a') as out:
for line in input_file:
line = line.strip().split('\t')
sampleID = line[0]
genotypeID = line[1]
path = '/groups/umcg-bios/tmp03/projects/masked_BAMs/lldeepNotInBIOS/'+sampleID+'-lib1.bam'
if os.path.exists(path):
bam_file = path
else:
print(path+' bam not found')
continue
out.write(sampleID+','+genotypeID+','+bam_file+'\n')
| [
"niekdeklein@gmail.com"
] | niekdeklein@gmail.com |
46bbf9daf0b61574b23a2631b6a78bc7caa69495 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/documentdb/v20210515/sql_resource_sql_trigger.py | 61599bbe3cb634dfa2ed1f8cf1d6c22dcfb144dd | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,020 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SqlResourceSqlTriggerArgs', 'SqlResourceSqlTrigger']
@pulumi.input_type
class SqlResourceSqlTriggerArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
container_name: pulumi.Input[str],
database_name: pulumi.Input[str],
resource: pulumi.Input['SqlTriggerResourceArgs'],
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input['CreateUpdateOptionsArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SqlResourceSqlTrigger resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input['SqlTriggerResourceArgs'] resource: The standard JSON format of a trigger
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input['CreateUpdateOptionsArgs'] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] trigger_name: Cosmos DB trigger name.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if options is not None:
pulumi.set(__self__, "options", options)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if trigger_name is not None:
pulumi.set(__self__, "trigger_name", trigger_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
Cosmos DB container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['SqlTriggerResourceArgs']:
"""
The standard JSON format of a trigger
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['SqlTriggerResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input['CreateUpdateOptionsArgs']]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input['CreateUpdateOptionsArgs']]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="triggerName")
def trigger_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB trigger name.
"""
return pulumi.get(self, "trigger_name")
@trigger_name.setter
def trigger_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trigger_name", value)
class SqlResourceSqlTrigger(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure Cosmos DB trigger.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']] resource: The standard JSON format of a trigger
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] trigger_name: Cosmos DB trigger name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlTriggerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB trigger.
:param str resource_name: The name of the resource.
:param SqlResourceSqlTriggerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlTriggerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlTriggerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trigger_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlTriggerArgs.__new__(SqlResourceSqlTriggerArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["location"] = location
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["trigger_name"] = trigger_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20190801:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20191212:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200301:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200401:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20200901:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210115:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210315:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlTrigger"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlTrigger")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlTrigger, __self__).__init__(
'azure-native:documentdb/v20210515:SqlResourceSqlTrigger',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlTrigger':
"""
Get an existing SqlResourceSqlTrigger resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlTriggerArgs.__new__(SqlResourceSqlTriggerArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlTrigger(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlTriggerGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | johnbirdau.noreply@github.com |
3ca302784c9b639fe76172e986949f7b16c5f686 | b5df9e66d292ed332d2164ad6e454e6c5b333968 | /fetch-text-gui.py | e816fd2902cd17b2da960c64d21cb6455f033786 | [] | no_license | ronandoolan2/python-gui | c3be55d34b9ca9e95451419acfa4cdda70bfdc90 | 84e935c1c06a22616224b1bab42a0c550502b467 | refs/heads/master | 2021-01-20T06:52:33.290336 | 2017-05-01T16:09:14 | 2017-05-01T16:09:14 | 89,939,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from Tkinter import *
root = Tk()
svalue = StringVar() # defines the widget state as string
w = Entry(root,textvariable=svalue) # adds a textarea widget
w.textbox.grid(column=0,row=0)
def act():
print("you entered")
print('%s' % svalue.get())
foo = Button(root,text="Press Me", command=act)
foo.textbox.grid(column=0,row=1)
root.mainloop()
| [
"ronandoolan@gmail.com"
] | ronandoolan@gmail.com |
01e60493883cfefce15b71a07e7676b42da9e91e | aedded4974138c7e510337cd5dd99144a2a388a6 | /Sentimental Analysis using Elastic Search and Python/load_elasticsearch.py | 3913516136c143abc375b1f7e2cfd0c768ee99b8 | [] | no_license | dalalbhargav07/Data-Warehousing-to-Data-Analytics | a1304b4e606d776a48b7b815b945f615c5571ef9 | 96ed04416e3db9012cbb3e9a1a2cf591bbb4a7c8 | refs/heads/master | 2020-04-02T16:07:23.922297 | 2018-10-25T03:06:15 | 2018-10-25T03:06:15 | 154,599,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 06 15:13:45 2018
@author: Hardik Galiawala, Bhargav Dalal
"""
import csv
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Integer, Keyword, Text, connections
connections.create_connection(hosts=['team1.canadaeast.cloudapp.azure.com'])
class Tweet(DocType):
#user = Text(analyzer='snowball', fields={'raw': Keyword()})
tweet = Text(analyzer='snowball')
score = Text()
sentiment = Text(analyzer='snowball')
#hashtags = Keyword()
created_at = Date()
class Meta:
index = 'tweet_sentiments'
def save(self, ** kwargs):
'''
self.lines = len(self.tweet.split())
self.hashtags = [tag for tag in
self.tweet.split()
if tag.startswith('#')]
'''
return super(Tweet, self).save(** kwargs)
with open('sentimentAnalysis.csv', 'rb') as csvfile:
id_number = 0
sentiment = csv.reader(csvfile, delimiter=',')
for i in sentiment:
id_number = id_number + 1
Tweet.init()
tweet = Tweet(meta={'id': id_number})
tweet.tweet = i[0]
tweet.sentiment = i[1]
tweet.score = i[2]
tweet.created_at = datetime.now()
tweet.save()
| [
"dalal.bhargav07@gmail.com"
] | dalal.bhargav07@gmail.com |
afa792b926c2ea3c9563b1ca60d34e69bc4fc2bc | b2ba78fb1e53f92efdc3b6e0be50c81e5dd036ed | /plot_f/plot_offline_mbl_5M_all.py | ef16bbcfd8943228d88a28c336263fa8c582ed91 | [
"MIT"
] | permissive | ShuoZ9379/Integration_SIL_and_MBL | 2dcfae10cb5929c4121a3a8bfceebae8c0b6ba08 | d7df6501a665d65eb791f7fd9b8e85fd660e6320 | refs/heads/master | 2020-07-23T20:04:17.304302 | 2019-09-23T18:58:57 | 2019-09-23T18:58:57 | 207,690,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | import os, argparse, subprocess
import matplotlib.pyplot as plt
import numpy as np
from baselines.common import plot_util as pu
def arg_parser():
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def filt(results,name,name_2=''):
ls=[r for r in results if name in r.dirname and name_2 in r.dirname]
return ls
def filt_or(results,name,name_2):
ls=[r for r in results if name in r.dirname or name_2 in r.dirname]
return ls
def filt_or_or_or(results,name,name_2,name_3,name_4):
ls=[r for r in results if name in r.dirname or name_2 in r.dirname or name_3 in r.dirname or name_4 in r.dirname]
return ls
def main():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='HalfCheetah-v2')
parser.add_argument('--dir', type=str, default='logs')
parser.add_argument('--thesis', type=str, default='Offline_V0')
args = parser.parse_args()
# dirname = '~/Desktop/carla_sample_efficient/data/bk/bkup_EXP2_FINAL/'+args.extra_dir+args.env
dirname = '~/Desktop/logs/'+args.dir+'/EXP_OFF_24_5M_V0/'+args.env
results = pu.load_results(dirname)
r_copos1_nosil,r_copos2_nosil,r_trpo_nosil,r_ppo_nosil=filt(results,'copos1-'),filt(results,'copos2-'),filt(results,'trpo-'),filt(results,'ppo-')
r_copos1_sil,r_copos2_sil,r_trpo_sil,r_ppo_sil=filt(results,'copos1+sil-'),filt(results,'copos2+sil-'),filt(results,'trpo+sil-'),filt(results,'ppo+sil-')
r_mbl_sil=filt(results,'mbl+','sil-')
# r_mbl_nosil_tmp=[r for r in results if r not in r_mbl_sil]
r_mbl_nosil=filt_or_or_or(results,'mbl+copos1-','mbl+copos2-','mbl+trpo-','mbl+ppo-')
r_copos1_comp, r_copos2_comp, r_trpo_comp, r_ppo_comp=filt_or(results,'mbl+copos1','copos1+sil'),filt_or(results,'mbl+copos2','copos2+sil'),filt_or(results,'mbl+trpo','trpo+sil'),filt_or(results,'mbl+ppo','ppo+sil')
dt={'copos1_nosil':r_copos1_nosil,'copos2_nosil':r_copos2_nosil, 'trpo_nosil':r_trpo_nosil, 'ppo_nosil':r_ppo_nosil,
'copos1_sil':r_copos1_sil,'copos2_sil':r_copos2_sil, 'trpo_sil':r_trpo_sil, 'ppo_sil':r_ppo_sil,
'mbl_nosil':r_mbl_nosil, 'mbl_sil':r_mbl_sil,
'copos1_comp':r_copos1_comp,'copos2_comp':r_copos2_comp, 'trpo_comp':r_trpo_comp, 'ppo_comp':r_ppo_comp}
for name in dt:
pu.plot_results(dt[name],xy_fn=pu.progress_mbl_vbest_xy_fn,average_group=True,name=name,split_fn=lambda _: '',shaded_err=True,shaded_std=False)
plt.xlabel('Number of Timesteps [M]')
plt.ylabel('Best Average Return [-]')
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(9, 7.5)
# fig.savefig("/Users/zsbjltwjj/Desktop/carla_sample_efficient/plot_f/OFFLINE/"+args.extra_dir+args.env+'/'+name+'.pdf',format="pdf")
fig.savefig("/Users/zsbjltwjj/Desktop/thesis/img/"+args.thesis+"/"+args.env+'/'+name+'.pdf', format="pdf")
if name=='mbl_nosil' or name=='mbl_sil':
pu.plot_results(dt[name],xy_fn=pu.progress_default_entropy_xy_fn,average_group=True,name=name,split_fn=lambda _: '',shaded_err=True,shaded_std=False,legend_entropy=1)
plt.xlabel('Number of Timesteps [M]')
plt.ylabel('Entropy [-]')
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(9, 7.5)
# fig.savefig("/Users/zsbjltwjj/Desktop/carla_sample_efficient/plot_f/OFFLINE/"+args.extra_dir+args.env+'/'+name+'_entropy.pdf',format="pdf")
fig.savefig("/Users/zsbjltwjj/Desktop/thesis/img/"+args.thesis+"/"+args.env+'/'+name+'_entropy.pdf', format="pdf")
if __name__ == '__main__':
main()
| [
"zhangshuo19930709@gmail.com"
] | zhangshuo19930709@gmail.com |
0daaa4c643fd769852f1d89126fea5c9a7ff5325 | e4d441f9d9e743e685650b3aee1f09d09d7e4681 | /zad 1.py | d3e16f68a9089f76375a361dc0d1d5d4da03e0cd | [] | no_license | pstatkiewicz/lista-3 | b9f56f462365713dd49a908579f0315f31c49ee7 | d7133d7222928bcb35d7cfde1c004cc94d798ed7 | refs/heads/main | 2023-06-07T20:47:53.101174 | 2021-01-18T14:46:47 | 2021-01-18T14:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | import matplotlib.pyplot as plt
def how_many_letters(string):
string.lower()
counter=[0]*26
letters=[]
list1=[]
for i in range(26):
letters.append(chr(97+i))
for i in range(len(string)):
if ord(string[i])<123 and ord(string[i])>96:
counter[ord(string[i])-97]+=1
for i in range(26):
list1.append([counter[i],letters[i]])
list1.sort(reverse=True)
result=list1[0:10]
counter1=[]
letters1=[]
for i in result:
counter1.append(i[0])
letters1.append(i[1])
plt.bar(letters1,counter1)
plt.title("Częstotliwość występowania liter")
plt.show() #matplotlib
return result #print
print("Podaj tekst: ")
text=input()
print(how_many_letters(text))
| [
"patrykstatkiewicz1012@gmail.com"
] | patrykstatkiewicz1012@gmail.com |
0b459f2956f8b32f62c231644e0df079e662cadd | 5a82795c3860745112b7410d9060c5ef671adba0 | /leetcode/Network Delay Time.py | 0ead9c6fd14d36b87d1e6aaa9d1e5ac0d91d18eb | [] | no_license | ashishvista/geeks | 8e09d0f3a422c1c9a1c1b19d879ebafa31b62f44 | 1677a304fc7857a3054b574e8702491f5ce01a04 | refs/heads/master | 2023-03-05T12:01:03.911096 | 2021-02-15T03:00:56 | 2021-02-15T03:00:56 | 336,996,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | class Graph:
v = None
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
self.weights = {}
def addEdge(self, u, v, w):
self.adj[u].append(v)
self.weights[str(u) + "-" + str(v)] = w
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = Graph(N)
dist = [float("+inf") for i in range(N)]
dist[K - 1] = 0
visited = {}
for uv in times:
graph.addEdge(uv[0] - 1, uv[1] - 1, uv[2])
varr = {K - 1: 1}
while varr:
su = self.getLowestCostV(varr, dist)
del varr[su]
visited[su] = 1
for v in graph.adj[su]:
new_dist = dist[su] + graph.weights[str(su) + "-" + str(v)]
if new_dist < dist[v]:
dist[v] = new_dist
if v not in visited:
varr[v] = 1
largest = float("-inf")
if len(visited) != N:
return -1
for d in dist:
largest = max(largest, d)
return largest
def getLowestCostV(self, varr, dist):
sw = float("inf")
sv = None
for v in varr:
if sw > dist[v]:
sw = dist[v]
sv = v
return sv
| [
"ashish@groomefy.com"
] | ashish@groomefy.com |
307ab6fcace65005c39968f593e93497733e5f09 | caaf7723580684886559dedba9a0cfa19036243d | /autofocus.py | 6372eb067440d35876fbc6f9048db2bbfc5c9398 | [] | no_license | mike-fang/led_micro | 27214b5d9e67abd3dbc85c2962be13bb82c83723 | c08105b1cd84836fed2dea11074e1d47d13f099a | refs/heads/master | 2022-11-28T10:46:09.647242 | 2020-08-02T19:44:22 | 2020-08-02T19:44:22 | 275,946,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | from elp_usb_cam import ELP_Camera
from capture_msi import init_rb, STD_EXPOSURE
import numpy as np
from asi_controller import AsiController
from time import time, sleep
from get_sharpness import grad_sharp
import cv2
import matplotlib.pylab as plt
def get_sharpness(img):
gy, gx = np.gradient(img)
gnorm = np.sqrt(gx**2 + gy**2)
sharpness = np.average(gnorm)
return sharpness
class AutoFocus:
def __init__(self, cam, rb, control, rng=10, steps=20, maxrng=100):
self.cam = cam
self.rb = rb
self.stage = control
self.rng = rng
self.steps = steps
self.maxrng = maxrng
def scan(self, z0, rng, steps):
z_low = max(z0-rng, self.z_low)
z_high = min(z0+rng, self.z_high)
z_scan = np.linspace(z_low, z_high, steps)
sharp_scan = np.zeros_like(z_scan)
for n, z in enumerate(z_scan):
self.stage.goto_z(z)
sleep(1)
for _ in range(5):
frame = self.cam.capture_img()
sharp = grad_sharp(frame)
sharp_scan[n] = sharp
return z_scan, sharp_scan
def step(self, iter=2):
# turn on white led
state = np.zeros(8)
state[4] = 1
self.rb.set_state(state)
rng = self.rng
steps = self.steps
z0 = self.stage.where_z()
self.z_low = z0 - self.maxrng
self.z_high = z0 + self.maxrng
for i in range(iter):
print(f'Searching in range {z0-rng, z0+rng}')
Z, S = self.scan(z0, rng, steps)
z0 = Z[S.argmax()]
rng /= (steps / 3)
self.stage.goto_z(z0)
print(f'Best depth -- z={z0:.4f}')
sleep(1.)
# turn off leds
state = np.zeros(8)
self.rb.set_state(state)
if __name__ == '__main__':
# init
cam = ELP_Camera(0)
rb = init_rb()
control = AsiController(config_file='./asi_config.yml', init_xy=False)
rng=20
steps=10
af = AutoFocus(cam, rb, control, rng=rng, steps=steps)
af.step(iter=2)
plt.imshow(cam.capture_img())
plt.show()
| [
"1michaelfang@gmail.com"
] | 1michaelfang@gmail.com |
3ad1be3f4021991b8dff98164ef3af62fb67b912 | d0fe3d0316aa90ef68c9a39f0335d53602d2be44 | /node_modules/socket.io-servicebus/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/build/config.gypi | 11810bdab21d746e4e7e0b083659e41b147c5e81 | [
"MIT",
"Apache-2.0"
] | permissive | bitchwhocodes/photobooth | 98184bc8eb1ed17ad37176ff216b30ee42d471e6 | a8f8182d9d971333899b3c427f9182b607b3992a | refs/heads/master | 2020-12-24T17:17:49.726329 | 2015-04-11T06:46:31 | 2015-04-11T06:46:31 | 33,573,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,839 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_has_winsdk": "true",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"uv_library": "static_library",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\stmulcah\\.node-gyp\\0.12.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"access": "",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cafile": "",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm\\etc\\npmignore",
"group": "",
"heading": "npm",
"https_proxy": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\stmulcah\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "0.12.1",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"prefix": "C:\\Users\\stmulcah\\AppData\\Roaming\\npm",
"production": "",
"proprietary_attribs": "true",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"scope": "",
"searchexclude": "",
"searchopts": "",
"searchsort": "name",
"shell": "C:\\windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tmp": "C:\\Users\\stmulcah\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\stmulcah\\.npmrc",
"user_agent": "npm/2.5.1 node/v0.12.1 win32 ia32",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"stacey.mulcahy@gmail.com"
] | stacey.mulcahy@gmail.com |
35501df946cd308d781d7730ff743efd5f3dc66a | fbb16a594e43cf57690c2ada793e8a8a9386caa7 | /okane.py | d18ccb6d6d37df30b2e5e262be14172fb0b0faff | [] | no_license | voyager42/okane | 20315a8bdb1ce923bd0a547d095b6b11d4e6913d | 7472399d2ab66f0962e8faf4d6768fb810727d61 | HEAD | 2016-09-01T16:54:51.881820 | 2014-01-12T16:11:37 | 2014-01-12T16:11:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,248 | py | '''
Created on Feb 8, 2012
@author: johan
'''
import sys
import os
import transaction
import wx
import random
import wx.html
import Controller
import Model
import View
import TransactionView
import TotalsView
import csv
import Shapes
import logging
import logging.config
import math
logging.basicConfig(level=logging.WARN)
motionlog=logging.getLogger('motion')
motionlog.setLevel("WARN")
eventlog = logging.getLogger('event')
eventlog.setLevel("INFO")
wildcard = "CSV files (*.csv)|*.csv|" \
"All files (*.*)|*.*"
def calcMouseVelocity(posOld, posNew):
"Computes the velocity of the mouse"
dX = posNew[0] - posOld[0]
dY = posNew[1] - posOld[1]
motionlog.debug("dX = %s" % (dX))
motionlog.debug("dY = %s" % (dY))
angle = math.atan2(dY, dX) + 0.5*math.pi
speed = 0.05* math.hypot(dX, dY)
motionlog.debug("Speed = %s, Angle = %s deg" % (speed, math.degrees(angle)))
if speed*speed < 10:
return (speed, angle)
else:
return (0,0)
class Frame(wx.Frame):
def __init__(self, parent, title, size=wx.DefaultSize):
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition, size)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.Bind(wx.EVT_LEFT_DOWN, self.OnClick)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnClick)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
self.Bind(wx.EVT_RIGHT_UP, self.OnRelease)
self.Bind(wx.EVT_MOTION, self.OnMotion)
menuBar = wx.MenuBar()
menu = wx.Menu()
m_open = menu.Append(wx.ID_FILE, "O&pen\tAlt-O", "Open file")
m_exit = menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Close window and exit program.")
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
self.statusbar = self.CreateStatusBar()
# MVC
# self.controller = Controller.CController()
# self.model = Model.CModel()
# self.controller.setModel(self.model)
# self.totalsView = TotalsView.TotalsView(self, "Category/Totals View", (400, 300))
# self.totalsView.setModel(self.model)
# self.controller.setView(self.totalsView)
#self.transactionView = TransactionView.TransactionView(self, "Transaction View", (400,200))
#self.transactionView.setModel(self.model)
#self.controller.setView(self.transactionView)
# events
self.Bind(wx.EVT_MENU, self.OnOpen, m_open)
self.Bind(wx.EVT_MENU, self.OnClose, m_exit)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
#self.Bind(wx.EVT_LEFT_UP, self.OnClick)
self.transactionList = []
self.transactionDict = {}
self.transactionTotals = {}
self.modelChangedCallbacks = []
self.shapes = list()
self.clickedShapes = list()
self.rightClickedShapes = list()
self.timer = wx.Timer(self)
self.timer.Start(100)
self.shapes = list()
self.clickedShapes = list()
self.rightClickedShapes = list()
self.frameState="NORMAL"
#self.generateShapes()
t = transaction.Bucket(pos=(10,10), size=(70,70), amt=0, desc="Bucket", cat="test", droptarget=True)
#t = Shapes.RandomRect()
self.shapes.append(t)
# t = Rect((0,0), (30,30))
#t = Shapes.RandomRect()
#self.shapes.append(t)
self.lastMovePosition = (0,0)
self.selectedShape = None
# layout
#box = wx.BoxSizer(wx.VERTICAL)
#box.Add(self.totalsView, wx.EXPAND)
#box.Add(self.transactionView, wx.EXPAND)
#self.SetSizer(box)
#self.Layout()
def dumpTransactionList(self):
for t in self.shapes:
print t
def openFile(self, fn):
ifile = open(fn, "rb")
reader = csv.reader(ifile)
for row in reader:
if len(row) > 0 and row[0] == "HIST":
self.shapes.append(transaction.DrawableTransaction(date=row[1], amt=row[3], cat=row[4], desc=row[5]))
ifile.close()
#self.dumpTransactionList()
#self.createDicts()
#self.dumpDicts()
#self.notifyModelChanged()
def OnClose(self, event):
dlg = wx.MessageDialog(self,
"Do you really want to close this application?",
"Confirm Exit", wx.OK|wx.CANCEL|wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_OK:
self.Destroy()
def OnOpen(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR)
dlg.ShowModal()
self.openFile(dlg.GetFilename())
#self.totalsView.start()
dlg.Destroy()
self.Refresh()
def OnSize(self, event):
print "ON SIZE"
hsize = event.GetSize()[0] * 0.75
self.SetSizeHints(minW=-1, minH=hsize, maxH=hsize)
self.SetTitle(str(event.GetSize()))
self.Refresh()
def isLeftClick(self, e):
return (e.GetButton() == wx.MOUSE_BTN_LEFT)
def isRightClick(self, e):
return (e.GetButton() == wx.MOUSE_BTN_RIGHT)
def guessSelectedShape(self, e):
x, y = e.GetPosition()
if self.isLeftClick(e):
self.clickedShapes = [s for s in self.shapes if s.contains(x, y)]
self.clickedShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
eventlog.info("clickedShapes: %s" % (self.clickedShapes))
if e.ShiftDown():
if len(self.clickedShapes) > 1:
return self.clickedShapes[1]
else:
try:
clicked = self.clickedShapes[0]
return clicked
except:
for s in self.shapes:
s.state = "NORMAL"
return None
if self.isRightClick(e):
self.rightClickedShapes = [s for s in self.shapes if s.contains(x, y)]
self.rightClickedShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
eventlog.info("rightClickedShapes: %s" % (self.rightClickedShapes))
if e.ShiftDown():
if len(self.rightClickedShapes) > 1:
return self.rightClickedShapes[1]
else:
try:
clicked = self.rightClickedShapes[0]
return clicked
except:
for s in self.shapes:
s.state = "NORMAL"
return None
def add(self, s):
self.shapes.append(t)
def OnMotion(self, e):
newX, newY = e.GetPosition()
if self.frameState == "POSSIBLE_LEFT_DRAG" and e.LeftIsDown():
self.frameState = self.selectedShape.state = "LEFT_DRAGGING"
elif self.frameState == "LEFT_DRAGGING" and e.LeftIsDown():
# or ((self.frameState == "RIGHT_DRAGGING" and e.RightIsDown()):
oldX, oldY = self.lastMovePosition
deltaX = newX - oldX
deltaY = newY - oldY
self.selectedShape.moveBy(deltaX, deltaY)
else:
self.frameState = "MOTION"
calcMouseVelocity(self.lastMovePosition, (newX, newY))
self.lastMovePosition = e.GetPosition()
self.Refresh()
def OnClick(self, e):
x, y = e.GetPosition()
if self.isLeftClick(e):
eventlog.info("LEFT CLICK")
for s in self.clickedShapes:
s.isClicked=False
s.state="NORMAL"
del self.clickedShapes[:]
# motionlog.debug("OnClick (%s, %s)" % (x, y)
self.selectedShape = self.guessSelectedShape(e)
eventlog.info("%s", self.selectedShape)
self.statusbar.SetStatusText("%r" %(self.selectedShape))
try:
self.selectedShape.isClicked=True
self.frameState = self.selectedShape.state = "POSSIBLE_LEFT_DRAG"
self.selectedShape.velocity =(0,0)
except:
pass
elif self.isRightClick(e):
eventlog.info("RIGHT CLICK")
for s in self.rightClickedShapes:
s.isRightClicked=False
del self.rightClickedShapes[:]
try:
self.selectedShape.isRightClicked=True
self.frameState = self.selectedShape.state = "POSSIBLE_RIGHT_DRAG"
except:
pass
self.lastPosition = (x, y)
# motionlog.debug("Shape %s has a hit" % (self.clickedShapes[0])
self.Refresh()
e.Skip() # recommended practice
def OnRelease(self,e):
if self.frameState=="LEFT_DRAGGING":
newX, newY = e.GetPosition()
oldX, oldY = self.lastPosition
deltaX = newX - oldX
deltaY = newY - oldY
self.Refresh()
def OnTimer(self, e):
for i in self.shapes:
i.updatePosition()
(x,y) = i.position
self.targetShapes = [s for s in self.shapes if s.contains(x, y) and s is not i and i.isVisible()]
if len(self.targetShapes) > 0:
self.targetShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
s=self.targetShapes[0]
eventlog.info("TARGET SHAPE : %s", s)
# try:
if s.isDropTarget:
eventlog.info("DO SOMETHING WITH THE COLLISION EVENT")
s.add(i.amt)
i.container=s
i.hide()
else:
eventlog.info("COLLISION BUT %s is not a drop target ", s)
motionlog.debug("DROPPED")
# except:
# eventlog.info("NO TARGET SHAPE")
self.Refresh()
def OnPaint(self, e):
dc = wx.PaintDC(self)
for i in self.shapes:
i.drawself(dc)
# if self.selectedShape != None:
# (x,y) = self.selectedShape.position
# self.targetShapes = [s for s in self.shapes if s.contains(x, y) and s is not self.selectedShape]
# if len(self.targetShapes) > 0:
# self.targetShapes.sort(key=lambda shape: shape.zOrder, reverse=True)
# s=self.targetShapes[0]
# eventlog.info("TARGET SHAPE : %s", s)
# try:
# if s.isDropTarget:
# eventlog.info("DO SOMETHING WITH THE COLLISION EVENT")
# s.add(self.selectedShape.amt)
# self.selectedShape.container=s
# else:
# eventlog.info("COLLISION BUT %s is not a drop target ", s)
# motionlog.debug("DROPPED")
# except:
# eventlog.info("NO TARGET SHAPE")
def main():
app = wx.App(redirect=False) # Error messages go to popup window
top = Frame(None, "Okane", size=(620, 620))
top.Show()
app.MainLoop()
if __name__ == "__main__":
random.seed()
main()
| [
"johan.kohler@gmail.com"
] | johan.kohler@gmail.com |
e1d4c5491ac53c61a1a27e3e470c44305f3885f3 | 56bcef6090d53b43afb60240079d6c71ed01f808 | /kalakriti/customer_block/views.py | 1bdd9cf807b7f6d63fd9e418f0f3a454f3d6a9c3 | [] | no_license | hrs2203/soad_project_2020 | 1091da3b167c863a28229ddc95859fdd9396f637 | 067853dd52676062358b5841fdcca0feefaabfc4 | refs/heads/main | 2023-02-04T20:44:23.771516 | 2020-12-22T06:50:11 | 2020-12-22T06:50:11 | 308,274,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,119 | py | from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import User, AnonymousUser
from customer_block.forms import (
user_login_form,
business_login_form,
user_signup_form,
business_signup_form,
)
from customer_block.models import CustomerModel, BusinessModel, OrderModel
import os, random, string
from pathlib import Path
from image_model.models import Product
from image_model.forms import upload_product_form
from django.core.files.storage import default_storage
def home_page(request):
return render(request=request, template_name="homepage.html", context={})
def login_user_page(request):
context = dict()
if request.method == "POST":
form = user_login_form(request.POST)
if form.is_valid():
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
else:
form = user_login_form()
context["user"] = request.user
context["form"] = form
if request.user.is_authenticated:
return redirect("/user")
print("INNNN")
return render(request=request, template_name="login_user.html", context=context)
def login_business_page(request):
context = dict()
if request.method == "POST":
form = business_login_form(request.POST)
if form.is_valid():
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
else:
form = business_login_form()
context["user"] = request.user
context["form"] = form
if request.user.is_authenticated:
return redirect("/business")
return render(request=request, template_name="login_business.html", context=context)
def logout_page(request):
logout(request)
return redirect("/")
def signup_user_page(request):
context = {}
if request.method == "POST":
form = user_signup_form(request.POST)
if form.is_valid():
try:
newUser = User.objects.create_user(
email=form.cleaned_data["userEmail"],
username=form.cleaned_data["userName"],
password=form.cleaned_data["password"],
)
newUser.is_staff = False
newUser.save()
except:
return redirect("/signup/user")
newUserDetail = CustomerModel(userModel=newUser)
newUserDetail.save()
user = authenticate(
request,
username=request.POST["userName"],
password=request.POST["password"],
)
if user:
login(request, user)
return redirect("/")
else:
print("not auth")
else:
form = user_signup_form()
context["form"] = form
return render(request=request, template_name="signup_user.html", context=context)
def signup_business_page(request):
context = {}
if request.method == "POST":
form = business_signup_form(request.POST)
if form.is_valid():
try:
newUser = User.objects.create_user(
email=form.cleaned_data["businessEmail"],
username=form.cleaned_data["businessName"],
password=form.cleaned_data["password"],
)
newUser.is_staff = True
newUser.save()
except:
print("some error")
return redirect("/signup/business")
newBusinessDetail = BusinessModel(
userModel=newUser,
serviceCharge=form.cleaned_data["serviceCharge"],
businessDescription=form.cleaned_data["businessDescription"],
)
newBusinessDetail.save()
user = authenticate(
request,
username=request.POST["businessName"],
password=request.POST["password"],
)
if user:
login(request, user)
return redirect("/")
else:
print("not auth")
else:
form = business_signup_form()
context["form"] = form
return render(
request=request, template_name="signup_business.html", context=context
)
def user_page(request):
context = dict()
try:
context["customerDetail"] = CustomerModel.objects.filter(
userModel=request.user
)[0]
context["orderHistory"] = OrderModel.objects.filter(
userModelLink=context["customerDetail"]
)
except:
context = dict()
return render(request=request, template_name="user_page.html", context=context)
def business_page(request):
context = dict()
try:
context["businessDetail"] = BusinessModel.objects.filter(
userModel=request.user
)[0]
context["orderHistory"] = OrderModel.objects.filter(
businessModelLink=context["businessDetail"]
)
except:
context = dict()
return render(request=request, template_name="business_page.html", context=context)
def choice_page(request):
context = dict()
context["productList"] = Product.objects.all()[::-1]
context["dealerList"] = BusinessModel.objects.all()
return render(
request=request, template_name="design_list_page.html", context=context
)
def confirm_payment_page(request):
if request.method == "POST":
paymentAmount = int(request.POST["totalAmount"])
tempProductModel = Product.objects.filter(id=request.POST["productModelId"])[0]
tempCustomerModel = CustomerModel.objects.filter(
id=request.POST["userModelId"]
)[0]
tempCustomerModel.balance -= paymentAmount
tempCustomerModel.save()
tempbusinessModel = BusinessModel.objects.filter(
id=request.POST["businessModelId"]
)[0]
tempbusinessModel.balance += paymentAmount
tempbusinessModel.save()
tempOrder = OrderModel(
productModelLink=tempProductModel,
userModelLink=tempCustomerModel,
businessModelLink=tempbusinessModel,
paymentStatus=True,
deliveryStatus=False,
totalAmount=request.POST["totalAmount"],
)
tempOrder.save()
return redirect("/user")
def add_money_to_user(request):
if not request.user.is_authenticated:
return('/login/user')
if request.user.is_staff:
return redirect('/business')
context = dict()
if request.method == 'POST':
return redirect('/user')
return render(request=request, template_name="add_money_page.html", context=context )
def deliver_custom_product(request):
if request.method == 'POST':
orderId = request.POST['orderId']
tempOrderObj = OrderModel.objects.filter(id=orderId)[0]
try:
tempOrderObj.deliveryStatus = True
tempOrderObj.save()
except:
pass
if request.user.is_staff:
return redirect('/business')
else:
return redirect('/user')
def payment_page(request):
context = dict()
context["businessId"] = None
context["productId"] = None
context["businessObject"] = None
context["productObject"] = None
context["customerDetail"] = None
context["canUserPay"] = False
context["totalPaymentAmount"] = 0
if not request.user.is_staff:
if request.method == "POST":
try:
context["businessId"] = request.POST["selectedBusinessId"]
context["productId"] = request.POST["selectedProductId"]
context["businessObject"] = BusinessModel.objects.filter(
id=context["businessId"]
)[0]
context["productObject"] = Product.objects.filter(
id=context["productId"]
)[0]
context["totalPaymentAmount"] = (
context["businessObject"].serviceCharge
+ context["productObject"].ProductPrice
)
context["customerDetail"] = CustomerModel.objects.filter(
userModel=request.user
)[0]
context["canUserPay"] = (
context["customerDetail"].balance >= context["totalPaymentAmount"]
)
except:
context["businessId"] = None
context["productId"] = None
context["businessObject"] = None
context["productObject"] = None
context["customerDetail"] = None
context["canUserPay"] = False
context["totalPaymentAmount"] = 0
return render(request=request, template_name="make_payment.html", context=context)
def genRandomName(fileName):
"""Generate unique Name for images
Args:
fileName (str): fileName to get its extention
Returns:
str: unique file name
"""
fileExt = fileName.split(".")[-1]
randName = "".join([random.choice(string.ascii_lowercase) for i in range(20)])
resp = f"{randName}.{fileExt}"
BASE_FILE = Path(__file__).resolve().parent.parent
FILE_PATH = os.path.join(BASE_FILE, "image_model", "images", resp)
while os.path.isfile(FILE_PATH):
randName = "".join([random.choice(string.ascii_lowercase) for i in range(20)])
resp = f"{randName}.{fileExt}"
BASE_FILE = Path(__file__).resolve().parent.parent
FILE_PATH = os.path.join(BASE_FILE, "image_model", "images", resp)
return resp
def upload_custom_product(request):
""" Web interface to upload image """
if not request.user.is_authenticated:
return redirect("/login/business")
if not request.user.is_staff:
return redirect("/user")
context = {}
if request.method == "POST":
form = upload_product_form(request.POST, request.FILES)
if form.is_valid:
temp_file = request.FILES["ProductImage"]
tempFileName = genRandomName(temp_file.name)
file_name = default_storage.save(tempFileName, temp_file)
tempProduct = Product(
ProductName=request.POST["ProductName"],
ProductUrl=f"/static/{tempFileName}",
ProductDescription=request.POST["ProductDescription"],
ProductPrice=request.POST["ProductPrice"],
)
tempProduct.save()
return redirect("/make_choice")
form = upload_product_form()
context["form"] = form
return render(request=request, template_name="upload_product.html", context=context)
| [
"hrishabh2203@gmail.com"
] | hrishabh2203@gmail.com |
1cd6b2cde90dae2214ce5243b597fbe6393438cf | 15da36fb301d51425ce38e7af6e77a64e05e62d5 | /test_iterator.py | b0ef7cea978b72ad37b7c2383065723f86ad077b | [] | no_license | bassemhossam/Video-Summarization | 7375681d5d875d2644ad1f05d5ab66936db5a019 | 321cfb659e742a96407c57c6bcac101062c3cff0 | refs/heads/master | 2020-12-05T11:14:08.537642 | 2020-07-19T09:57:57 | 2020-07-19T09:57:57 | 232,091,949 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # Helper code used for testing and seeing the outputs from the iterator.
from data_iterator import *
import torch
device = torch.device("cpu")
iterator = SSIterator(64, 15, 20,"test", device,max_videos=200)
iterator.start()
batch = iterator.next()
counter = 0
while batch != None:
counter+=1
batch = iterator.next()
print(counter)
print(counter)
| [
"noreply@github.com"
] | bassemhossam.noreply@github.com |
e04c0bf21ef6ef4a8ce6e6a89f934139e335a5d8 | f098c361ee79bb8b7a8402fcf20b37f17fb36983 | /Back-End/Python/Basics/Part -1 - Functional/04 - First-Class-Functions/send_email_partial.py | f536c40a3c3798957ca6c45af1bfb96feb7036ee | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | rnsdoodi/Programming-CookBook | 4d619537a6875ffbcb42cbdaf01d80db1feba9b4 | 9bd9c105fdd823aea1c3f391f5018fd1f8f37182 | refs/heads/master | 2023-09-05T22:09:08.282385 | 2021-10-31T11:57:40 | 2021-10-31T11:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | from functools import partial
def sendmail(to, subject, body):
# code to send email
print('To:{0}, Subject:{1}, Body:{2}'.format(to, subject, body))
email_admin = 'palin@python.edu'
email_devteam = 'idle@python.edu;cleese@python.edu'
# Now when we want to send emails we would have to write things like:
# Email 1
sendmail(email_admin, 'My App Notification', 'the parrot is dead.')
# Email 2
sendmail(';'.join((email_admin, email_devteam)), 'My App Notification',
'the ministry is closed until further notice.')
# Email 1
# To:palin@python.edu,
# Subject:My App Notification,
# Body:the parrot is dead.
# Email 2
# To:palin@python.edu;idle@python.edu;cleese@python.edu,
# Subject:My App Notification,
# Body:the ministry is closed until further notice.
# Partial
# Email 1
send_admin = partial(sendmail, email_admin, 'For you eyes only')
# Email 2
send_dev = partial(sendmail, email_devteam, 'Dear IT:')
# Email 3
send_all = partial(sendmail, ';'.join((email_admin, email_devteam)), 'Loyal Subjects')
send_admin('the parrot is dead.')
send_all('the ministry is closed until further notice.')
def sendmail(to, subject, body, *, cc=None, bcc=email_devteam):
# code to send email
print('To:{0}, Subject:{1}, Body:{2}, CC:{3}, BCC:{4}'.format(to,
subject,
body,
cc,
bcc))
# Email 1
send_admin = partial(sendmail, email_admin, 'General Admin')
# Email 2
send_admin_secret = partial(sendmail, email_admin, 'For your eyes only', cc=None, bcc=None)
send_admin('and now for something completely different')
#To:palin@python.edu,
# Subject:General Admin,
# Body:and now for something completely different,
# CC:None,
# BCC:idle@python.edu;cleese@python.edu
send_admin_secret('the parrot is dead!')
#To:palin@python.edu,
# Subject:For your eyes only,
# Body:the parrot is dead!,
# CC:None,
# BCC:None
send_admin_secret('the parrot is no more!', bcc=email_devteam)
# To:palin@python.edu,
# Subject:For your eyes only,
# Body:the parrot is no more!,
# CC:None,
# BCC:idle@python.edu;cleese@python.edu | [
"58447627+Koubae@users.noreply.github.com"
] | 58447627+Koubae@users.noreply.github.com |
32d28d9915732158ab3594e0d09aa170fcda9791 | 8127f4197870cda55bdb064deef6412415844a10 | /manage.py | cc31fe6a73af46a194a920f177b85ce51bb70a2f | [] | no_license | mbaiye/django | a5480d529ef4e8abcd43e90587dcd1bb96d143e6 | ec665b577635285724c0ce5dca5f92ea344994a1 | refs/heads/main | 2023-08-12T23:50:03.200932 | 2021-09-13T20:30:42 | 2021-09-13T20:30:42 | 357,112,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoP.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"moyosorebaiye@gmail.com"
] | moyosorebaiye@gmail.com |
3f01613eeb26d292392466fcaaaf61554c04e567 | 2b488c48c2d5a6996ead7ba34c99d8cd24c25052 | /api/getCreatedDate.py | 98d071724bae6717ba5239699d2de9bfcb9248ab | [
"MIT"
] | permissive | yashrastogi16/steemapi-django | 84f527ea918a48e7e7b6de2cd17d0b2e43109659 | 716455da839686e0305461e735119da9560e0a2d | refs/heads/master | 2020-03-17T13:38:36.653144 | 2018-05-05T17:51:05 | 2018-05-05T17:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from django.http import HttpResponse
from steem import Steem
def index(request):
try:
s = Steem(nodes=["https://api.steemit.com"])
try:
created = s.get_account(request.GET['a'])['created']
except:
created = None
return HttpResponse(created, content_type='text/plain')
except:
return HttpResponse('To use this API call, please supply param a=accountname, substituting accountname with the account to see its creation date.\n\n'
'Example: https://api.steem.place/getCreatedDate/?a=moisesmcardona\n\n'
'Returns: Created date', content_type='text/plain') | [
"moises@moises-studios.com"
] | moises@moises-studios.com |
bd373d5d4ad4b354c760abc092fd061bfaab15e3 | 777fa9edef9c1a88423762c85adaf6716244bcd5 | /app/api/v2/views/user_views.py | bf6b3b4d81eeea36ab3f84041b9bc1eeefa157af | [] | no_license | ansarisan/vigilant-spoon | 1f12c05a408ee36ab8c2371d595f48f4ec3f27ce | 50eaeac7e3f1748a8cbc2ae8fa0e4e4619a851aa | refs/heads/master | 2020-04-16T12:43:42.428369 | 2019-01-14T05:21:00 | 2019-01-14T05:21:00 | 165,593,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from flask import Flask, request, jsonify
from .. import version2
from .. models.user_model import UserModels
from werkzeug.security import check_password_hash
from werkzeug.exceptions import BadRequest
@version2.route("/users", methods=["GET"])
def hello():
""" List of all registered users """
resp = UserModels().fetch_users()
return jsonify(resp)
@version2.route("/auth/signup", methods=["POST"])
def register_user():
""" Registers a user given details """
data = request.get_json()
resp = UserModels(data).register_user()
return jsonify(resp), 201
@version2.route("/auth/login", methods=["POST"])
def login_user():
""" Logs in a registered user """
data = request.get_json()
try:
password = data["password"]
username = data["username"]
except KeyError as p:
raise BadRequest(
"{} should be present in the provided data".format(p))
resp = UserModels([username, password]).login_user()
return jsonify(resp), resp["status"]
| [
"Leewelkarani@gmail.com"
] | Leewelkarani@gmail.com |
54d30e07523c4b8a72948a31db878f4c25809cfc | 7b48cfecdf478bfffbf1cc9cb20c62100898eb6a | /mordred/mordred.py | b1a214096714b6e219f614269a6e2b424fbaef4c | [] | no_license | albertinisg/mordred | cc2cd17806cac208857f3e04b48b1cd13095e9b6 | e141e1c6ad09ea89f194c410285903542b6b557e | refs/heads/master | 2021-01-13T15:50:36.193316 | 2017-02-05T15:08:32 | 2017-02-05T15:08:32 | 76,866,269 | 0 | 0 | null | 2016-12-19T13:52:35 | 2016-12-19T13:52:35 | null | UTF-8 | Python | false | false | 12,992 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Luis Cañas-Díaz <lcanas@bitergia.com>
# Alvaro del Castillo <acs@bitergia.com>
#
import configparser
import logging
import time
import json
import sys
import requests
import threading
from datetime import datetime, timedelta
from grimoire_elk.utils import get_connectors
from mordred.task_collection import TaskRawDataCollection
from mordred.task_enrich import TaskEnrich
from mordred.task_identities import TaskIdentitiesCollection, TaskIdentitiesInit, TaskIdentitiesMerge
from mordred.task_manager import TasksManager
from mordred.task_panels import TaskPanels, TaskPanelsMenu
SLEEPFOR_ERROR = """Error: You may be Arthur, King of the Britons. But you still """ + \
"""need the 'sleep_for' variable in sortinghat section\n - Mordred said."""
ES_ERROR = "Before starting to seek the Holy Grail, make sure your ElasticSearch " + \
"at '%(uri)s' is available!!\n - Mordred said."
logger = logging.getLogger(__name__)
class ElasticSearchError(Exception):
"""Exception raised for errors in the list of backends
"""
def __init__(self, expression):
self.expression = expression
class Mordred:
def __init__(self, conf_file):
self.conf_file = conf_file
self.conf = None
def update_conf(self, conf):
self.conf = conf
def read_conf_files(self):
conf = {}
logger.debug("Reading conf files")
config = configparser.ConfigParser()
config.read(self.conf_file)
logger.debug(config.sections())
if 'min_update_delay' in config['general'].keys():
conf['min_update_delay'] = config.getint('general','min_update_delay')
else:
# if no parameter is included, the update won't be performed more
# than once every minute
conf['min_update_delay'] = 60
# FIXME: Read all options in a generic way
conf['es_collection'] = config.get('es_collection', 'url')
conf['es_enrichment'] = config.get('es_enrichment', 'url')
conf['autorefresh_on'] = config.getboolean('es_enrichment', 'autorefresh')
conf['studies_on'] = config.getboolean('es_enrichment', 'studies')
projects_file = config.get('projects','projects_file')
conf['projects_file'] = projects_file
with open(projects_file,'r') as fd:
projects = json.load(fd)
conf['projects'] = projects
conf['collection_on'] = config.getboolean('phases','collection')
conf['identities_on'] = config.getboolean('phases','identities')
conf['enrichment_on'] = config.getboolean('phases','enrichment')
conf['panels_on'] = config.getboolean('phases','panels')
conf['update'] = config.getboolean('general','update')
try:
conf['kibana'] = config.get('general','kibana')
except configparser.NoOptionError:
pass
conf['sh_bots_names'] = config.get('sortinghat', 'bots_names').split(',')
# Optional config params
try:
conf['sh_no_bots_names'] = config.get('sortinghat', 'no_bots_names').split(',')
except configparser.NoOptionError:
pass
conf['sh_database'] = config.get('sortinghat', 'database')
conf['sh_host'] = config.get('sortinghat', 'host')
conf['sh_user'] = config.get('sortinghat', 'user')
conf['sh_password'] = config.get('sortinghat', 'password')
aux_matching = config.get('sortinghat', 'matching')
conf['sh_matching'] = aux_matching.replace(' ','').split(',')
aux_autoprofile = config.get('sortinghat', 'autoprofile')
conf['sh_autoprofile'] = aux_autoprofile.replace(' ','').split(',')
conf['sh_orgs_file'] = config.get('sortinghat', 'orgs_file')
conf['sh_load_orgs'] = config.getboolean('sortinghat', 'load_orgs')
try:
conf['sh_sleep_for'] = config.getint('sortinghat','sleep_for')
except configparser.NoOptionError:
if conf['identities_on'] and conf['update']:
logging.error(SLEEPFOR_ERROR)
sys.exit(1)
try:
conf['sh_ids_file'] = config.get('sortinghat', 'identities_file')
except configparser.NoOptionError:
logger.info("No identities files")
for backend in self.__get_backends():
try:
raw = config.get(backend, 'raw_index')
enriched = config.get(backend, 'enriched_index')
conf[backend] = {'raw_index':raw, 'enriched_index':enriched}
for p in config[backend]:
try:
conf[backend][p] = config.getboolean(backend, p)
except ValueError:
conf[backend][p] = config.get(backend, p)
except configparser.NoSectionError:
pass
return conf
def check_es_access(self):
##
## So far there is no way to distinguish between read and write permission
##
def _ofuscate_server_uri(uri):
if uri.rfind('@') > 0:
pre, post = uri.split('@')
char_from = pre.rfind(':')
result = uri[0:char_from + 1] + '****@' + post
return result
else:
return uri
es = self.conf['es_collection']
try:
r = requests.get(es, verify=False)
if r.status_code != 200:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
except:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
if self.conf['enrichment_on'] or self.conf['studies_on']:
es = self.conf['es_enrichment']
try:
r = requests.get(es, verify=False)
if r.status_code != 200:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
except:
raise ElasticSearchError(ES_ERROR % {'uri' : _ofuscate_server_uri(es)})
def __get_backends(self):
gelk_backends = list(get_connectors().keys())
extra_backends = ["google_hits"]
return gelk_backends + extra_backends
def __get_repos_by_backend(self):
#
# return dict with backend and list of repositories
#
output = {}
projects = self.conf['projects']
for backend in self.__get_backends():
for pro in projects:
if backend in projects[pro]:
if not backend in output:
output[backend] = projects[pro][backend]
else:
output[backend] = output[backend] + projects[pro][backend]
# backend could be in project/repo file but not enabled in
# mordred conf file
enabled = {}
for k in output:
if k in self.conf:
enabled[k] = output[k]
# logger.debug('repos to be retrieved: %s ', enabled)
return enabled
def execute_tasks (self, tasks_cls):
"""
Just a wrapper to the execute_batch_tasks method
"""
self.execute_batch_tasks(tasks_cls)
def execute_nonstop_tasks(self, tasks_cls):
"""
Just a wrapper to the execute_batch_tasks method
"""
self.execute_batch_tasks(tasks_cls, self.conf['sh_sleep_for'], self.conf['min_update_delay'], False)
def execute_batch_tasks(self, tasks_cls, big_delay=0, small_delay=0, wait_for_threads = True):
"""
Start a task manager per backend to complete the tasks.
:param task_cls: list of tasks classes to be executed
:param big_delay: seconds before global tasks are executed, should be days usually
:param small_delay: seconds before blackend tasks are executed, should be minutes
:param wait_for_threads: boolean to set when threads are infinite or
should be synchronized in a meeting point
"""
def _split_tasks(tasks_cls):
"""
we internally distinguish between tasks executed by backend
and tasks executed with no specific backend. """
backend_t = []
global_t = []
for t in tasks_cls:
if t.is_backend_task(t):
backend_t.append(t)
else:
global_t.append(t)
return backend_t, global_t
logger.debug(' Task Manager starting .. ')
backend_tasks, global_tasks = _split_tasks(tasks_cls)
logger.debug ('backend_tasks = %s' % (backend_tasks))
logger.debug ('global_tasks = %s' % (global_tasks))
threads = []
# stopper won't be set unless wait_for_threads is True
stopper = threading.Event()
# launching threads for tasks by backend
if len(backend_tasks) > 0:
repos_backend = self.__get_repos_by_backend()
for backend in repos_backend:
# Start new Threads and add them to the threads list to complete
t = TasksManager(backend_tasks, backend, repos_backend[backend],
stopper, self.conf, small_delay)
threads.append(t)
t.start()
# launch thread for global tasks
if len(global_tasks) > 0:
#FIXME timer is applied to all global_tasks, does it make sense?
gt = TasksManager(global_tasks, None, None, stopper, self.conf, big_delay)
threads.append(gt)
gt.start()
if big_delay > 0:
when = datetime.now() + timedelta(seconds = big_delay)
when_str = when.strftime('%a, %d %b %Y %H:%M:%S %Z')
logger.info("%s will be executed on %s" % (global_tasks, when_str))
if wait_for_threads:
time.sleep(1) # Give enough time create and run all threads
stopper.set() # All threads must stop in the next iteration
logger.debug(" Waiting for all threads to complete. This could take a while ..")
# Wait for all threads to complete
for t in threads:
t.join()
logger.debug(" Task manager and all its tasks (threads) finished!")
def run(self):
#logger.debug("Starting Mordred engine ...")
logger.info("")
logger.info("----------------------------")
logger.info("Starting Mordred engine ...")
logger.info("- - - - - - - - - - - - - - ")
self.update_conf(self.read_conf_files())
# check we have access to the needed ES
self.check_es_access()
# do we need ad-hoc scripts?
tasks_cls = []
all_tasks_cls = []
# phase one
# we get all the items with Perceval + identites browsing the
# raw items
if self.conf['identities_on']:
tasks_cls = [TaskIdentitiesInit]
self.execute_tasks(tasks_cls)
if self.conf['collection_on']:
tasks_cls = [TaskRawDataCollection]
#self.execute_tasks(tasks_cls)
if self.conf['identities_on']:
tasks_cls.append(TaskIdentitiesCollection)
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['identities_on']:
tasks_cls = [TaskIdentitiesMerge]
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['enrichment_on']:
# raw items + sh database with merged identities + affiliations
# will used to produce a enriched index
tasks_cls = [TaskEnrich]
all_tasks_cls += tasks_cls
self.execute_tasks(tasks_cls)
if self.conf['panels_on']:
# Remove first the dashboard menu
tasks_cls = [TaskPanels, TaskPanelsMenu]
self.execute_tasks(tasks_cls)
logger.debug(' - - ')
logger.debug('Meeting point 0 reached')
time.sleep(1)
while self.conf['update']:
self.execute_nonstop_tasks(all_tasks_cls)
logger.info("Finished Mordred engine ...")
| [
"acs@bitergia.com"
] | acs@bitergia.com |
300682d48f2cb716193d184532e5d3018b6188db | 8e69eee9b474587925e22413717eb82e4b024360 | /v2.5.7/toontown/shtiker/HtmlView.py | 6f65c5d143302ce38dc6ebb01cf2b6f26205dff4 | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 11,473 | py | import array, sys
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from panda3d.core import Texture
from panda3d.core import CardMaker
from panda3d.core import NodePath
from panda3d.core import Point3, Vec3, Vec4, VBase4D, Point2
from panda3d.core import PNMImage
from panda3d.core import TextureStage
from panda3d.core import Texture
from panda3d.core import WindowProperties
from direct.interval.IntervalGlobal import *
from panda3d.core import AwWebView
from panda3d.core import AwWebCore
WEB_WIDTH_PIXELS = 784
WEB_HEIGHT_PIXELS = 451
WEB_WIDTH = 1024
WEB_HEIGHT = 512
WEB_HALF_WIDTH = WEB_WIDTH / 2
WIN_WIDTH = 800
WIN_HEIGHT = 600
GlobalWebcore = None
class HtmlView(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HtmlView')
useHalfTexture = config.GetBool('news-half-texture', 0)
def __init__(self, parent=aspect2d):
global GlobalWebcore
self.parent = parent
self.mx = 0
self.my = 0
self.htmlFile = 'index.html'
self.transparency = False
if GlobalWebcore:
pass
else:
GlobalWebcore = AwWebCore(AwWebCore.LOGVERBOSE, True, AwWebCore.PFBGRA)
GlobalWebcore.setBaseDirectory('.')
for errResponse in xrange(400, 600):
GlobalWebcore.setCustomResponsePage(errResponse, 'error.html')
self.webView = GlobalWebcore.createWebView(WEB_WIDTH, WEB_HEIGHT, self.transparency, False, 70)
frameName = ''
inGameNewsUrl = self.getInGameNewsUrl()
self.imgBuffer = array.array('B')
for i in xrange(WEB_WIDTH * WEB_HEIGHT):
self.imgBuffer.append(0)
self.imgBuffer.append(0)
self.imgBuffer.append(0)
self.imgBuffer.append(255)
if self.useHalfTexture:
self.leftBuffer = array.array('B')
for i in xrange(WEB_HALF_WIDTH * WEB_HEIGHT):
self.leftBuffer.append(0)
self.leftBuffer.append(0)
self.leftBuffer.append(0)
self.leftBuffer.append(255)
self.rightBuffer = array.array('B')
for i in xrange(WEB_HALF_WIDTH * WEB_HEIGHT):
self.rightBuffer.append(0)
self.rightBuffer.append(0)
self.rightBuffer.append(0)
self.rightBuffer.append(255)
self.setupTexture()
if self.useHalfTexture:
self.setupHalfTextures()
self.accept('mouse1', self.mouseDown, [AwWebView.LEFTMOUSEBTN])
self.accept('mouse3', self.mouseDown, [AwWebView.RIGHTMOUSEBTN])
self.accept('mouse1-up', self.mouseUp, [AwWebView.LEFTMOUSEBTN])
self.accept('mouse3-up', self.mouseUp, [AwWebView.RIGHTMOUSEBTN])
def getInGameNewsUrl(self):
result = config.GetString('fallback-news-url', 'http://cdn.toontown.disney.go.com/toontown/en/gamenews/')
override = config.GetString('in-game-news-url', '')
if override:
self.notify.info('got an override url, using %s for in a game news' % override)
result = override
else:
try:
launcherUrl = base.launcher.getValue('GAME_IN_GAME_NEWS_URL', '')
if launcherUrl:
result = launcherUrl
self.notify.info('got GAME_IN_GAME_NEWS_URL from launcher using %s' % result)
else:
self.notify.info('blank GAME_IN_GAME_NEWS_URL from launcher, using %s' % result)
except:
self.notify.warning('got exception getting GAME_IN_GAME_NEWS_URL from launcher, using %s' % result)
return result
def setupTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH_PIXELS / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT_PIXELS) / float(WIN_HEIGHT)
cm.setFrame(-htmlWidth / 2.0, htmlWidth / 2.0, -htmlHeight / 2.0, htmlHeight / 2.0)
bottomRightX = WEB_WIDTH_PIXELS / float(WEB_WIDTH + 1)
bottomRightY = WEB_HEIGHT_PIXELS / float(WEB_HEIGHT + 1)
cm.setUvRange(Point2(0, 1 - bottomRightY), Point2(bottomRightX, 1))
card = cm.generate()
self.quad = NodePath(card)
self.quad.reparentTo(self.parent)
self.guiTex = Texture('guiTex')
self.guiTex.setupTexture(Texture.TT2dTexture, WEB_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.guiTex.setMinfilter(Texture.FTLinear)
self.guiTex.setKeepRamImage(True)
self.guiTex.makeRamImage()
self.guiTex.setWrapU(Texture.WMRepeat)
self.guiTex.setWrapV(Texture.WMRepeat)
ts = TextureStage('webTS')
self.quad.setTexture(ts, self.guiTex)
self.quad.setTexScale(ts, 1.0, -1.0)
self.quad.setTransparency(0)
self.quad.setTwoSided(True)
self.quad.setColor(1.0, 1.0, 1.0, 1.0)
self.calcMouseLimits()
def setupHalfTextures(self):
self.setupLeftTexture()
self.setupRightTexture()
self.fullPnmImage = PNMImage(WEB_WIDTH, WEB_HEIGHT, 4)
self.leftPnmImage = PNMImage(WEB_HALF_WIDTH, WEB_HEIGHT, 4)
self.rightPnmImage = PNMImage(WEB_HALF_WIDTH, WEB_HEIGHT, 4)
def setupLeftTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT) / float(WIN_HEIGHT)
cm.setFrame(-htmlWidth / 2.0, 0, -htmlHeight / 2.0, htmlHeight / 2.0)
card = cm.generate()
self.leftQuad = NodePath(card)
self.leftQuad.reparentTo(self.parent)
self.leftGuiTex = Texture('guiTex')
self.leftGuiTex.setupTexture(Texture.TT2dTexture, WEB_HALF_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.leftGuiTex.setKeepRamImage(True)
self.leftGuiTex.makeRamImage()
self.leftGuiTex.setWrapU(Texture.WMClamp)
self.leftGuiTex.setWrapV(Texture.WMClamp)
ts = TextureStage('leftWebTS')
self.leftQuad.setTexture(ts, self.leftGuiTex)
self.leftQuad.setTexScale(ts, 1.0, -1.0)
self.leftQuad.setTransparency(0)
self.leftQuad.setTwoSided(True)
self.leftQuad.setColor(1.0, 1.0, 1.0, 1.0)
def setupRightTexture(self):
cm = CardMaker('quadMaker')
cm.setColor(1.0, 1.0, 1.0, 1.0)
aspect = base.camLens.getAspectRatio()
htmlWidth = 2.0 * aspect * WEB_WIDTH / float(WIN_WIDTH)
htmlHeight = 2.0 * float(WEB_HEIGHT) / float(WIN_HEIGHT)
cm.setFrame(0, htmlWidth / 2.0, -htmlHeight / 2.0, htmlHeight / 2.0)
card = cm.generate()
self.rightQuad = NodePath(card)
self.rightQuad.reparentTo(self.parent)
self.rightGuiTex = Texture('guiTex')
self.rightGuiTex.setupTexture(Texture.TT2dTexture, WEB_HALF_WIDTH, WEB_HEIGHT, 1, Texture.TUnsignedByte, Texture.FRgba)
self.rightGuiTex.setKeepRamImage(True)
self.rightGuiTex.makeRamImage()
self.rightGuiTex.setWrapU(Texture.WMClamp)
self.rightGuiTex.setWrapV(Texture.WMClamp)
ts = TextureStage('rightWebTS')
self.rightQuad.setTexture(ts, self.rightGuiTex)
self.rightQuad.setTexScale(ts, 1.0, -1.0)
self.rightQuad.setTransparency(0)
self.rightQuad.setTwoSided(True)
self.rightQuad.setColor(1.0, 1.0, 1.0, 1.0)
def calcMouseLimits(self):
ll = Point3()
ur = Point3()
self.quad.calcTightBounds(ll, ur)
self.notify.debug('ll=%s ur=%s' % (ll, ur))
offset = self.quad.getPos(aspect2d)
self.notify.debug('offset = %s ' % offset)
ll.setZ(ll.getZ() + offset.getZ())
ur.setZ(ur.getZ() + offset.getZ())
self.notify.debug('new LL=%s, UR=%s' % (ll, ur))
relPointll = self.quad.getRelativePoint(aspect2d, ll)
self.notify.debug('relPoint = %s' % relPointll)
self.mouseLL = (aspect2d.getScale()[0] * ll[0], aspect2d.getScale()[2] * ll[2])
self.mouseUR = (aspect2d.getScale()[0] * ur[0], aspect2d.getScale()[2] * ur[2])
self.notify.debug('original mouseLL=%s, mouseUR=%s' % (self.mouseLL, self.mouseUR))
def writeTex(self, filename='guiText.png'):
self.notify.debug('writing texture')
self.guiTex.generateRamMipmapImages()
self.guiTex.write(filename)
def toggleRotation(self):
if self.interval.isPlaying():
self.interval.finish()
else:
self.interval.loop()
def mouseDown(self, button):
messenger.send('wakeup')
self.webView.injectMouseDown(button)
def mouseUp(self, button):
self.webView.injectMouseUp(button)
def reload(self):
pass
def zoomIn(self):
self.webView.zoomIn()
def zoomOut(self):
self.webView.zoomOut()
def toggleTransparency(self):
self.transparency = not self.transparency
self.webView.setTransparent(self.transparency)
def update(self, task):
if base.mouseWatcherNode.hasMouse():
x, y = self._translateRelativeCoordinates(base.mouseWatcherNode.getMouseX(), base.mouseWatcherNode.getMouseY())
if self.mx - x != 0 or self.my - y != 0:
self.webView.injectMouseMove(x, y)
self.mx, self.my = x, y
if self.webView.isDirty():
self.webView.render(self.imgBuffer.buffer_info()[0], WEB_WIDTH * 4, 4)
Texture.setTexturesPower2(2)
textureBuffer = self.guiTex.modifyRamImage()
textureBuffer.setData(self.imgBuffer.tostring())
if self.useHalfTexture:
self.guiTex.store(self.fullPnmImage)
self.leftPnmImage.copySubImage(self.fullPnmImage, 0, 0, 0, 0, WEB_HALF_WIDTH, WEB_HEIGHT)
self.rightPnmImage.copySubImage(self.fullPnmImage, 0, 0, WEB_HALF_WIDTH, 0, WEB_HALF_WIDTH, WEB_HEIGHT)
self.leftGuiTex.load(self.leftPnmImage)
self.rightGuiTex.load(self.rightPnmImage)
self.quad.hide()
Texture.setTexturesPower2(1)
GlobalWebcore.update()
return Task.cont
def _translateRelativeCoordinates(self, x, y):
sx = int((x - self.mouseLL[0]) / (self.mouseUR[0] - self.mouseLL[0]) * WEB_WIDTH_PIXELS)
sy = WEB_HEIGHT_PIXELS - int((y - self.mouseLL[1]) / (self.mouseUR[1] - self.mouseLL[1]) * WEB_HEIGHT_PIXELS)
return (
sx, sy)
def unload(self):
self.ignoreAll()
self.webView.destroy()
self.webView = None
return
def onCallback(self, name, args):
if name == 'requestFPS':
pass
def onBeginNavigation(self, url, frameName):
pass
def onBeginLoading(self, url, frameName, statusCode, mimeType):
pass
def onFinishLoading(self):
self.notify.debug('finished loading')
def onReceiveTitle(self, title, frameName):
pass
def onChangeTooltip(self, tooltip):
pass
def onChangeCursor(self, cursor):
pass
def onChangeKeyboardFocus(self, isFocused):
pass
def onChangeTargetURL(self, url):
pass | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
e9f959214d9f66ad9770deeda8fcf6daf9801267 | 11c3dc3f51ec2cab15ce0b8a3be8e8aa06f4686c | /backend/api/product/migrations/0008_auto_20200418_1644.py | bd446b559c6e9eead0c451222bd95f86e3b53465 | [
"MIT"
] | permissive | 0mri/GStore | b3797986473a211ab0581cd80948cfbec4a8a0f3 | 232cf03a6deab15ae4178933210a7431496d9dd0 | refs/heads/master | 2022-11-26T11:47:58.825764 | 2020-08-01T10:01:01 | 2020-08-01T10:01:01 | 259,165,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 2.2 on 2020-04-18 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0007_auto_20200410_1743'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=7),
),
]
| [
"omri@efra.im"
] | omri@efra.im |
8864c5625cee7be5cd7ac66b57768f555f562984 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /chrome/browser/resources/PRESUBMIT.py | e7a3e430986f0a2d2062a15497b6b5e3e4784501 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 5,423 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for files in chrome/browser/resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
ACTION_XML_PATH = '../../../tools/metrics/actions/actions.xml'
def CheckUserActionUpdate(input_api, output_api, action_xml_path):
"""Checks if any new user action has been added."""
if any('actions.xml' == input_api.os_path.basename(f) for f in
input_api.change.LocalPaths()):
# If actions.xml is already included in the changelist, the PRESUBMIT
# for actions.xml will do a more complete presubmit check.
return []
file_filter = lambda f: f.LocalPath().endswith('.html')
action_re = r'(^|\s+)metric\s*=\s*"([^ ]*)"'
current_actions = None
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
match = input_api.re.search(action_re, line)
if match:
# Loads contents in tools/metrics/actions/actions.xml to memory. It's
# loaded only once.
if not current_actions:
with open(action_xml_path) as actions_f:
current_actions = actions_f.read()
metric_name = match.group(2)
is_boolean = IsBoolean(f.NewContents(), metric_name, input_api)
# Search for the matched user action name in |current_actions|.
if not IsActionPresent(current_actions, metric_name, is_boolean):
return [output_api.PresubmitPromptWarning(
'File %s line %d: %s is missing in '
'tools/metrics/actions/actions.xml. Please run '
'tools/metrics/actions/extract_actions.py to update.'
% (f.LocalPath(), line_num, metric_name), [])]
return []
def IsActionPresent(current_actions, metric_name, is_boolean):
"""Checks if metric_name is defined in the actions file.
Checks whether there's matching entries in an actions.xml file for the given
|metric_name|, depending on whether it is a boolean action.
Args:
current_actions: The content of the actions.xml file.
metric_name: The name for which the check should be done.
is_boolean: Whether the action comes from a boolean control.
"""
if not is_boolean:
action = 'name="{0}"'.format(metric_name)
return action in current_actions
action_disabled = 'name="{0}_Disable"'.format(metric_name)
action_enabled = 'name="{0}_Enable"'.format(metric_name)
return (action_disabled in current_actions and
action_enabled in current_actions)
def IsBoolean(new_content_lines, metric_name, input_api):
"""Check whether action defined in the changed code is boolean or not.
Checks whether the action comes from boolean control based on the HTML
elements attributes.
Args:
new_content_lines: List of changed lines.
metric_name: The name for which the check should be done.
"""
new_content = '\n'.join(new_content_lines)
html_element_re = r'<(.*?)(^|\s+)metric\s*=\s*"%s"(.*?)>' % (metric_name)
type_re = (r'datatype\s*=\s*"boolean"|type\s*=\s*"checkbox"|'
'type\s*=\s*"radio".*?value\s*=\s*("true"|"false")')
match = input_api.re.search(html_element_re, new_content, input_api.re.DOTALL)
return (match and
any(input_api.re.search(type_re, match.group(i)) for i in (1, 3)))
def CheckHtml(input_api, output_api):
return input_api.canned_checks.CheckLongLines(
input_api, output_api, 80, lambda x: x.LocalPath().endswith('.html'))
def RunOptimizeWebUiTests(input_api, output_api):
presubmit_path = input_api.PresubmitLocalPath()
tests = [input_api.os_path.join(presubmit_path, 'optimize_webui_test.py')]
return input_api.canned_checks.RunUnitTests(input_api, output_api, tests)
def _CheckWebDevStyle(input_api, output_api):
results = []
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', '..', 'tools')]
import web_dev_style.presubmit_support
results += web_dev_style.presubmit_support.CheckStyle(input_api, output_api)
finally:
sys.path = old_sys_path
return results
def _CheckChangeOnUploadOrCommit(input_api, output_api):
results = CheckUserActionUpdate(input_api, output_api, ACTION_XML_PATH)
affected = input_api.AffectedFiles()
if any(f for f in affected if f.LocalPath().endswith('.html')):
results += CheckHtml(input_api, output_api)
if any(f for f in affected if f.LocalPath().endswith('optimize_webui.py')):
results += RunOptimizeWebUiTests(input_api, output_api)
results += _CheckWebDevStyle(input_api, output_api)
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api,
check_js=True)
return results
def CheckChangeOnUpload(input_api, output_api):
return _CheckChangeOnUploadOrCommit(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CheckChangeOnUploadOrCommit(input_api, output_api)
def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:closure_compilation',
],
'Automatically added optional Closure bots to run on CQ.')
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com |
5be4e98dfe3ad8ffc97705d8d5603a2acd95de51 | 81b384655e970623333971ed063d85ebfe940ed5 | /hallo/test/modules/random/test_eight_ball.py | 09873ea43e78dbc223dba1ce75f8e3d48e17082f | [] | no_license | wirenic/Hallo | c3c8a3f11dd1f03729385f2761e0a6b216c6e1d2 | 68595816fd146c4af35e3f1bc91c58cdc6fa741c | refs/heads/master | 2023-03-18T06:50:14.111226 | 2021-02-26T07:59:33 | 2021-02-26T07:59:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | import unittest
from hallo.events import EventMessage
from hallo.inc.commons import Commons
from hallo.modules.random.eight_ball import EightBall
from hallo.test.test_base import TestBase
from hallo.test.modules.random.mock_chooser import MockChooser
class EightBallTest(TestBase, unittest.TestCase):
def setUp(self):
super().setUp()
self.chooser = MockChooser()
self.old_choice_method = Commons.get_random_choice
Commons.get_random_choice = self.chooser.choose
def tearDown(self):
super().tearDown()
Commons.get_random_choice = self.old_choice_method
def test_eightball(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
self.function_dispatcher.dispatch(
EventMessage(self.server, None, self.test_user, "eight ball")
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_eightball_with_message(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
self.function_dispatcher.dispatch(
EventMessage(
self.server,
None,
self.test_user,
"magic eightball will this test pass?",
)
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
def test_all_responses(self):
all_responses = (
EightBall.RESPONSES_YES_TOTALLY
+ EightBall.RESPONSES_YES_PROBABLY
+ EightBall.RESPONSES_MAYBE
+ EightBall.RESPONSES_NO
)
responses = []
for x in range(len(all_responses)):
# Set RNG
self.chooser.choice = x
# Shake magic eight ball
self.function_dispatcher.dispatch(
EventMessage(self.server, None, self.test_user, "magic8-ball")
)
data = self.server.get_send_data(1, self.test_user, EventMessage)
responses.append(data[0].text.lower()[:-1])
assert data[0].text.lower() in [
"{}.".format(x.lower()) for x in all_responses
], "Response isn't valid."
# Check all responses given
assert len(responses) == len(
all_responses
), "Not the same number of responses as possible responses"
assert set(responses) == set(
[x.lower() for x in all_responses]
), "Not all responses are given"
| [
"joshua@coales.co.uk"
] | joshua@coales.co.uk |
d698a20dedf519aa173880d68d7f6e8ed00066a9 | 87e424de1cb55b221b2b5f7c239850ae81db1e5e | /venv/bin/pip | 1ae730144a641a60e28ae5ee1596bd9f20af2f23 | [] | no_license | aliu917/NetworkDesign | ac6029c6dd4a1843d946ed7f9636bf5f41aaff54 | 75441ffa27a6aab1ce5a8d20469d059dac8709c0 | refs/heads/master | 2022-11-23T10:36:35.036412 | 2020-05-02T21:19:02 | 2020-05-02T21:19:30 | 281,579,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | #!/Users/angelaliu/PycharmProjects/Angela-s-Friends-Become-Network-Designers/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"aliu917@berkeley.edu"
] | aliu917@berkeley.edu | |
9a4f50681591049e468dde5df12c8247abf21f49 | 2e6cc4c6f5e3d532a83bc4ad2960b9ed6d9c6e5a | /releasenotes/source/conf.py | 4c6a1c415f03062d5860803d58bc5c5aef064c71 | [
"Apache-2.0"
] | permissive | Nexenta/manila | dba8cc9f18bf4ed54f2671fe8dc747bb4b7c2e38 | c7a044733b0be8b4aafd962f04a1a781b16a580b | refs/heads/master | 2023-07-22T09:14:13.728972 | 2023-07-10T16:14:47 | 2023-07-10T16:14:47 | 50,369,420 | 1 | 3 | Apache-2.0 | 2023-07-10T16:14:48 | 2016-01-25T17:55:21 | Python | UTF-8 | Python | false | false | 9,080 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Manila Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Manila Release Notes'
copyright = u'2015, Manila Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from manila.version import version_info as manila_version # noqa
# The full version, including alpha/beta/rc tags.
release = manila_version.version_string_with_vcs()
# The short X.Y version.
version = manila_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManilaReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ManilaReleaseNotes.tex', u'Manila Release Notes Documentation',
u'Manila Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'manilareleasenotes', u'Manila Release Notes Documentation',
[u'Manila Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ManilaReleaseNotes', u'Manila Release Notes Documentation',
u'Manila Developers', 'ManilaReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| [
"tbechtold@suse.com"
] | tbechtold@suse.com |
7b0d198edd0ab71fba7c49944e970931a0dbc404 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/F5-BIGIP-APM-MIB.py | f32fb6c2ec66d59f08f2b797478be0cf5a1eb2d9 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 117,788 | py | #
# PySNMP MIB module F5-BIGIP-APM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/F5-BIGIP-APM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:57:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
bigipCompliances, LongDisplayString, bigipGroups, bigipTrafficMgmt = mibBuilder.importSymbols("F5-BIGIP-COMMON-MIB", "bigipCompliances", "LongDisplayString", "bigipGroups", "bigipTrafficMgmt")
InetAddressType, InetAddress, InetPortNumber = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress", "InetPortNumber")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
NotificationType, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Opaque, Bits, ObjectIdentity, Unsigned32, TimeTicks, IpAddress, MibIdentifier, Integer32, iso, ModuleIdentity, Counter64, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Opaque", "Bits", "ObjectIdentity", "Unsigned32", "TimeTicks", "IpAddress", "MibIdentifier", "Integer32", "iso", "ModuleIdentity", "Counter64", "Gauge32")
MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "TextualConvention", "DisplayString")
bigipApm = ModuleIdentity((1, 3, 6, 1, 4, 1, 3375, 2, 6))
if mibBuilder.loadTexts: bigipApm.setLastUpdated('201507231521Z')
if mibBuilder.loadTexts: bigipApm.setOrganization('F5 Networks, Inc.')
apmProfiles = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1))
apmProfileAccessStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1))
apmProfileConnectivityStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2))
apmProfileRewriteStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3))
apmAccessStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4))
apmGlobalConnectivityStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5))
apmGlobalRewriteStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6))
apmProfileAccessAgentStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7))
apmProfileAccessMiscStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8))
apmLeasepool = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2))
apmLeasepoolStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1))
apmAcl = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3))
apmAclStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1))
apmPaStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmPaStatResetStats.setStatus('current')
apmPaStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatNumber.setStatus('current')
apmPaStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3), )
if mibBuilder.loadTexts: apmPaStatTable.setStatus('current')
apmPaStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmPaStatName"), (0, "F5-BIGIP-APM-MIB", "apmPaStatVsName"))
if mibBuilder.loadTexts: apmPaStatEntry.setStatus('current')
apmPaStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatName.setStatus('current')
apmPaStatConfigSyncState = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatConfigSyncState.setStatus('deprecated')
apmPaStatTotalSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatTotalSessions.setStatus('current')
apmPaStatTotalEstablishedStateSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatTotalEstablishedStateSessions.setStatus('current')
apmPaStatCurrentActiveSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatCurrentActiveSessions.setStatus('current')
apmPaStatCurrentPendingSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatCurrentPendingSessions.setStatus('current')
apmPaStatCurrentCompletedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatCurrentCompletedSessions.setStatus('current')
apmPaStatUserLoggedoutSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatUserLoggedoutSessions.setStatus('current')
apmPaStatAdminTerminatedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdminTerminatedSessions.setStatus('current')
apmPaStatMiscTerminatedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMiscTerminatedSessions.setStatus('current')
apmPaStatAccessPolicyResultAllow = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAccessPolicyResultAllow.setStatus('current')
apmPaStatAccessPolicyResultDeny = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAccessPolicyResultDeny.setStatus('current')
apmPaStatAccessPolicyResultRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAccessPolicyResultRedirect.setStatus('current')
apmPaStatAccessPolicyResultRedirectWithSession = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAccessPolicyResultRedirectWithSession.setStatus('current')
apmPaStatEndingDenyAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalInstances.setStatus('deprecated')
apmPaStatEndingDenyAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalUsages.setStatus('deprecated')
apmPaStatEndingDenyAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEndingDenyAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalFailures.setStatus('deprecated')
apmPaStatEndingDenyAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalErrors.setStatus('deprecated')
apmPaStatEndingDenyAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 20), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingDenyAgentTotalSessVars.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 21), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalInstances.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 22), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalUsages.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 23), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 24), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalFailures.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 25), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalErrors.setStatus('deprecated')
apmPaStatEndingRedirectAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 26), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingRedirectAgentTotalSessVars.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 27), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalInstances.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 28), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalUsages.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 29), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 30), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalFailures.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 31), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalErrors.setStatus('deprecated')
apmPaStatEndingAllowAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 32), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEndingAllowAgentTotalSessVars.setStatus('deprecated')
apmPaStatAdAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 33), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalInstances.setStatus('deprecated')
apmPaStatAdAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 34), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalUsages.setStatus('deprecated')
apmPaStatAdAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 35), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalSuccesses.setStatus('deprecated')
apmPaStatAdAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 36), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalFailures.setStatus('deprecated')
apmPaStatAdAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 37), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalErrors.setStatus('deprecated')
apmPaStatAdAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 38), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAdAgentTotalSessVars.setStatus('deprecated')
apmPaStatClientCertAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 39), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalInstances.setStatus('deprecated')
apmPaStatClientCertAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 40), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalUsages.setStatus('deprecated')
apmPaStatClientCertAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 41), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalSuccesses.setStatus('deprecated')
apmPaStatClientCertAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 42), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalFailures.setStatus('deprecated')
apmPaStatClientCertAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 43), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalErrors.setStatus('deprecated')
apmPaStatClientCertAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 44), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatClientCertAgentTotalSessVars.setStatus('deprecated')
apmPaStatHttpAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 45), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalInstances.setStatus('deprecated')
apmPaStatHttpAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 46), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalUsages.setStatus('deprecated')
apmPaStatHttpAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 47), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalSuccesses.setStatus('deprecated')
apmPaStatHttpAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 48), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalFailures.setStatus('deprecated')
apmPaStatHttpAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 49), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalErrors.setStatus('deprecated')
apmPaStatHttpAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 50), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatHttpAgentTotalSessVars.setStatus('deprecated')
apmPaStatLdapAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 51), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalInstances.setStatus('deprecated')
apmPaStatLdapAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 52), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalUsages.setStatus('deprecated')
apmPaStatLdapAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 53), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalSuccesses.setStatus('deprecated')
apmPaStatLdapAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 54), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalFailures.setStatus('deprecated')
apmPaStatLdapAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 55), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalErrors.setStatus('deprecated')
apmPaStatLdapAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 56), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLdapAgentTotalSessVars.setStatus('deprecated')
apmPaStatRadiusAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 57), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalInstances.setStatus('deprecated')
apmPaStatRadiusAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 58), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalUsages.setStatus('deprecated')
apmPaStatRadiusAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 59), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalSuccesses.setStatus('deprecated')
apmPaStatRadiusAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 60), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalFailures.setStatus('deprecated')
apmPaStatRadiusAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 61), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalErrors.setStatus('deprecated')
apmPaStatRadiusAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 62), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAgentTotalSessVars.setStatus('deprecated')
apmPaStatSecuridAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 63), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalInstances.setStatus('deprecated')
apmPaStatSecuridAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 64), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalUsages.setStatus('deprecated')
apmPaStatSecuridAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 65), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalSuccesses.setStatus('deprecated')
apmPaStatSecuridAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 66), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalFailures.setStatus('deprecated')
apmPaStatSecuridAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 67), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalErrors.setStatus('deprecated')
apmPaStatSecuridAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 68), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatSecuridAgentTotalSessVars.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 69), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalInstances.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 70), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalUsages.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 71), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalSuccesses.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 72), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalFailures.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 73), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalErrors.setStatus('deprecated')
apmPaStatRadiusAcctAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 74), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRadiusAcctAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 75), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 76), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 77), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 78), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 79), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsLinuxFcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 80), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxFcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 81), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 82), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 83), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 84), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 85), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsLinuxPcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 86), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsLinuxPcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 87), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 88), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 89), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 90), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 91), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsMacFcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 92), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacFcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 93), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 94), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 95), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 96), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 97), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsMacPcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 98), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsMacPcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 99), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 100), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 101), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 102), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 103), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinCcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 104), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinCcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsAvAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 105), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsAvAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 106), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsAvAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 107), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsAvAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 108), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsAvAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 109), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsAvAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 110), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsAvAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 111), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 112), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 113), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 114), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 115), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinOsInfoAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 116), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinOsInfoAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 117), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 118), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 119), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 120), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 121), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinFcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 122), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinFcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 123), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 124), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 125), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 126), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 127), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinMcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 128), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinMcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 129), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 130), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 131), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 132), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 133), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsFwcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 134), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsFwcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinPcTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 135), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalInstances.setStatus('deprecated')
apmPaStatEpsWinPcTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 136), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalUsages.setStatus('deprecated')
apmPaStatEpsWinPcTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 137), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinPcTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 138), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalFailures.setStatus('deprecated')
apmPaStatEpsWinPcTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 139), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalErrors.setStatus('deprecated')
apmPaStatEpsWinPcTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 140), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPcTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinPwTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 141), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalInstances.setStatus('deprecated')
apmPaStatEpsWinPwTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 142), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalUsages.setStatus('deprecated')
apmPaStatEpsWinPwTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 143), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinPwTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 144), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalFailures.setStatus('deprecated')
apmPaStatEpsWinPwTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 145), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalErrors.setStatus('deprecated')
apmPaStatEpsWinPwTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 146), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinPwTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 147), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 148), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 149), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 150), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 151), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinRcAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 152), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinRcAgentTotalSessVars.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 153), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalInstances.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 154), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalUsages.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 155), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalSuccesses.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 156), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalFailures.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 157), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalErrors.setStatus('deprecated')
apmPaStatEpsWinGpAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 158), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatEpsWinGpAgentTotalSessVars.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 159), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalInstances.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 160), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalUsages.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 161), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalSuccesses.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 162), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalFailures.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 163), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalErrors.setStatus('deprecated')
apmPaStatExternalLogonAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 164), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatExternalLogonAgentTotalSessVars.setStatus('deprecated')
apmPaStatLogonAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 165), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalInstances.setStatus('deprecated')
apmPaStatLogonAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 166), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalUsages.setStatus('deprecated')
apmPaStatLogonAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 167), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalSuccesses.setStatus('deprecated')
apmPaStatLogonAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 168), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalFailures.setStatus('deprecated')
apmPaStatLogonAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 169), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalErrors.setStatus('deprecated')
apmPaStatLogonAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 170), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLogonAgentTotalSessVars.setStatus('deprecated')
apmPaStatRaAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 171), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalInstances.setStatus('deprecated')
apmPaStatRaAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 172), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalUsages.setStatus('deprecated')
apmPaStatRaAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 173), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalSuccesses.setStatus('deprecated')
apmPaStatRaAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 174), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalFailures.setStatus('deprecated')
apmPaStatRaAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 175), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalErrors.setStatus('deprecated')
apmPaStatRaAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 176), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRaAgentTotalSessVars.setStatus('deprecated')
apmPaStatRdsAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 177), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalInstances.setStatus('deprecated')
apmPaStatRdsAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 178), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalUsages.setStatus('deprecated')
apmPaStatRdsAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 179), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalSuccesses.setStatus('deprecated')
apmPaStatRdsAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 180), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalFailures.setStatus('deprecated')
apmPaStatRdsAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 181), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalErrors.setStatus('deprecated')
apmPaStatRdsAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 182), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatRdsAgentTotalSessVars.setStatus('deprecated')
apmPaStatVaAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 183), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalInstances.setStatus('deprecated')
apmPaStatVaAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 184), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalUsages.setStatus('deprecated')
apmPaStatVaAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 185), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalSuccesses.setStatus('deprecated')
apmPaStatVaAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 186), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalFailures.setStatus('deprecated')
apmPaStatVaAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 187), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalErrors.setStatus('deprecated')
apmPaStatVaAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 188), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVaAgentTotalSessVars.setStatus('deprecated')
apmPaStatIeAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 189), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalInstances.setStatus('deprecated')
apmPaStatIeAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 190), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalUsages.setStatus('deprecated')
apmPaStatIeAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 191), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalSuccesses.setStatus('deprecated')
apmPaStatIeAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 192), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalFailures.setStatus('deprecated')
apmPaStatIeAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 193), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalErrors.setStatus('deprecated')
apmPaStatIeAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 194), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatIeAgentTotalSessVars.setStatus('deprecated')
apmPaStatLoggingAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 195), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalInstances.setStatus('deprecated')
apmPaStatLoggingAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 196), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalUsages.setStatus('deprecated')
apmPaStatLoggingAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 197), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalSuccesses.setStatus('deprecated')
apmPaStatLoggingAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 198), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalFailures.setStatus('deprecated')
apmPaStatLoggingAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 199), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalErrors.setStatus('deprecated')
apmPaStatLoggingAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 200), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatLoggingAgentTotalSessVars.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 201), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalInstances.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 202), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalUsages.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 203), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalSuccesses.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 204), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalFailures.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 205), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalErrors.setStatus('deprecated')
apmPaStatDecnBoxAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 206), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDecnBoxAgentTotalSessVars.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 207), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalInstances.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 208), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalUsages.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 209), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalSuccesses.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 210), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalFailures.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 211), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalErrors.setStatus('deprecated')
apmPaStatMesgBoxAgentTotalSessVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 212), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatMesgBoxAgentTotalSessVars.setStatus('deprecated')
apmPaStatApdNoResultErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 213), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdNoResultErrors.setStatus('deprecated')
apmPaStatApdNoSessionErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 214), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdNoSessionErrors.setStatus('deprecated')
apmPaStatApdNoDeviceInfoErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 215), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdNoDeviceInfoErrors.setStatus('deprecated')
apmPaStatApdNoTokenErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 216), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdNoTokenErrors.setStatus('deprecated')
apmPaStatApdNoSigErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 217), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdNoSigErrors.setStatus('deprecated')
apmPaStatApdTotalMismatchErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 218), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdTotalMismatchErrors.setStatus('deprecated')
apmPaStatApdInvalidSigErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 219), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdInvalidSigErrors.setStatus('deprecated')
apmPaStatApdMcPipelineInitErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 220), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdMcPipelineInitErrors.setStatus('deprecated')
apmPaStatApdMcSetSessVarErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 221), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdMcSetSessVarErrors.setStatus('deprecated')
apmPaStatApdMcPipelineCloseErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 222), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdMcPipelineCloseErrors.setStatus('deprecated')
apmPaStatApdApResultErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 223), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdApResultErrors.setStatus('deprecated')
apmPaStatApdApInternalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 224), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatApdApInternalErrors.setStatus('deprecated')
apmPaStatAllowedRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 225), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatAllowedRequests.setStatus('current')
apmPaStatDeniedRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 226), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatDeniedRequests.setStatus('current')
apmPaStatVsName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 1, 3, 1, 227), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPaStatVsName.setStatus('current')
apmPcStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmPcStatResetStats.setStatus('current')
apmPcStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatNumber.setStatus('current')
apmPcStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3), )
if mibBuilder.loadTexts: apmPcStatTable.setStatus('current')
apmPcStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmPcStatName"))
if mibBuilder.loadTexts: apmPcStatEntry.setStatus('current')
apmPcStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatName.setStatus('current')
apmPcStatTotConns = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatTotConns.setStatus('current')
apmPcStatCurConns = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatCurConns.setStatus('current')
apmPcStatMaxConns = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatMaxConns.setStatus('current')
apmPcStatIngressRaw = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatIngressRaw.setStatus('current')
apmPcStatEgressRaw = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatEgressRaw.setStatus('current')
apmPcStatIngressCompressed = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatIngressCompressed.setStatus('current')
apmPcStatEgressCompressed = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 2, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPcStatEgressCompressed.setStatus('current')
apmPrStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmPrStatResetStats.setStatus('current')
apmPrStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatNumber.setStatus('current')
apmPrStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3), )
if mibBuilder.loadTexts: apmPrStatTable.setStatus('current')
apmPrStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmPrStatName"))
if mibBuilder.loadTexts: apmPrStatEntry.setStatus('current')
apmPrStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatName.setStatus('current')
apmPrStatClientReqBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatClientReqBytes.setStatus('current')
apmPrStatClientRespBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatClientRespBytes.setStatus('current')
apmPrStatServerReqBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatServerReqBytes.setStatus('current')
apmPrStatServerRespBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatServerRespBytes.setStatus('current')
apmPrStatClientReqs = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatClientReqs.setStatus('current')
apmPrStatClientResps = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatClientResps.setStatus('current')
apmPrStatServerReqs = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatServerReqs.setStatus('current')
apmPrStatServerResps = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 3, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPrStatServerResps.setStatus('current')
apmPgStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmPgStatResetStats.setStatus('current')
apmPgStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatNumber.setStatus('current')
apmPgStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3), )
if mibBuilder.loadTexts: apmPgStatTable.setStatus('current')
apmPgStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmPgStatName"), (0, "F5-BIGIP-APM-MIB", "apmPgStatAgentName"))
if mibBuilder.loadTexts: apmPgStatEntry.setStatus('current')
apmPgStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatName.setStatus('current')
apmPgStatAgentName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 2), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatAgentName.setStatus('current')
apmPgStatInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatInstances.setStatus('current')
apmPgStatUsages = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatUsages.setStatus('current')
apmPgStatSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatSuccesses.setStatus('current')
apmPgStatFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatFailures.setStatus('current')
apmPgStatErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatErrors.setStatus('current')
apmPgStatSessionVars = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 7, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPgStatSessionVars.setStatus('current')
apmPmStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmPmStatResetStats.setStatus('current')
apmPmStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatNumber.setStatus('current')
apmPmStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3), )
if mibBuilder.loadTexts: apmPmStatTable.setStatus('current')
apmPmStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmPmStatName"))
if mibBuilder.loadTexts: apmPmStatEntry.setStatus('current')
apmPmStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatName.setStatus('current')
apmPmStatConfigSyncState = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatConfigSyncState.setStatus('current')
apmPmStatInspResultError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspResultError.setStatus('current')
apmPmStatInspSessionError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspSessionError.setStatus('current')
apmPmStatInspDeviceInfoError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspDeviceInfoError.setStatus('current')
apmPmStatInspTokenError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspTokenError.setStatus('current')
apmPmStatInspSignatureError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspSignatureError.setStatus('current')
apmPmStatInspDataMsmtchError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspDataMsmtchError.setStatus('current')
apmPmStatInspClientSignError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInspClientSignError.setStatus('current')
apmPmStatMemInitError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatMemInitError.setStatus('current')
apmPmStatMemSessionVarError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatMemSessionVarError.setStatus('current')
apmPmStatMemCloseError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatMemCloseError.setStatus('current')
apmPmStatResultError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatResultError.setStatus('current')
apmPmStatInternalError = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 8, 3, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmPmStatInternalError.setStatus('current')
apmAccessStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmAccessStatResetStats.setStatus('current')
apmAccessStatTotalSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatTotalSessions.setStatus('current')
apmAccessStatCurrentActiveSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatCurrentActiveSessions.setStatus('current')
apmAccessStatCurrentPendingSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatCurrentPendingSessions.setStatus('current')
apmAccessStatCurrentEndedSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatCurrentEndedSessions.setStatus('current')
apmAccessStatUserLoggedoutSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatUserLoggedoutSessions.setStatus('current')
apmAccessStatAdminTerminatedSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatAdminTerminatedSessions.setStatus('current')
apmAccessStatMiscTerminatedSessions = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatMiscTerminatedSessions.setStatus('current')
apmAccessStatResultAllow = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatResultAllow.setStatus('current')
apmAccessStatResultDeny = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatResultDeny.setStatus('current')
apmAccessStatResultRedirect = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatResultRedirect.setStatus('current')
apmAccessStatResultRedirectWithSession = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 4, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAccessStatResultRedirectWithSession.setStatus('current')
apmGlobalConnectivityStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmGlobalConnectivityStatResetStats.setStatus('current')
apmGlobalConnectivityStatTotConns = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatTotConns.setStatus('current')
apmGlobalConnectivityStatCurConns = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatCurConns.setStatus('current')
apmGlobalConnectivityStatMaxConns = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatMaxConns.setStatus('current')
apmGlobalConnectivityStatIngressRaw = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatIngressRaw.setStatus('current')
apmGlobalConnectivityStatEgressRaw = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatEgressRaw.setStatus('current')
apmGlobalConnectivityStatIngressCompressed = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatIngressCompressed.setStatus('current')
apmGlobalConnectivityStatEgressCompressed = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 5, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalConnectivityStatEgressCompressed.setStatus('current')
apmGlobalRewriteStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmGlobalRewriteStatResetStats.setStatus('current')
apmGlobalRewriteStatClientReqBytes = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatClientReqBytes.setStatus('current')
apmGlobalRewriteStatClientRespBytes = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatClientRespBytes.setStatus('current')
apmGlobalRewriteStatServerReqBytes = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatServerReqBytes.setStatus('current')
apmGlobalRewriteStatServerRespBytes = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatServerRespBytes.setStatus('current')
apmGlobalRewriteStatClientReqs = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatClientReqs.setStatus('current')
apmGlobalRewriteStatClientResps = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatClientResps.setStatus('current')
apmGlobalRewriteStatServerReqs = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatServerReqs.setStatus('current')
apmGlobalRewriteStatServerResps = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 1, 6, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmGlobalRewriteStatServerResps.setStatus('current')
apmLeasepoolStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmLeasepoolStatResetStats.setStatus('current')
apmLeasepoolStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatNumber.setStatus('current')
apmLeasepoolStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3), )
if mibBuilder.loadTexts: apmLeasepoolStatTable.setStatus('current')
apmLeasepoolStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmLeasepoolStatName"))
if mibBuilder.loadTexts: apmLeasepoolStatEntry.setStatus('current')
apmLeasepoolStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatName.setStatus('current')
apmLeasepoolStatCurMembers = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatCurMembers.setStatus('current')
apmLeasepoolStatCurAssigned = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatCurAssigned.setStatus('current')
apmLeasepoolStatCurFree = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatCurFree.setStatus('current')
apmLeasepoolStatMaxAssigned = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatMaxAssigned.setStatus('current')
apmLeasepoolStatTotPickRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotPickRequests.setStatus('current')
apmLeasepoolStatTotPickFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotPickFailure.setStatus('current')
apmLeasepoolStatTotReserveRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotReserveRequests.setStatus('current')
apmLeasepoolStatTotReserveFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotReserveFailure.setStatus('current')
apmLeasepoolStatTotReleaseRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotReleaseRequests.setStatus('current')
apmLeasepoolStatTotReleaseFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 2, 1, 3, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmLeasepoolStatTotReleaseFailure.setStatus('current')
apmAclStatResetStats = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apmAclStatResetStats.setStatus('current')
apmAclStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatNumber.setStatus('current')
apmAclStatTable = MibTable((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3), )
if mibBuilder.loadTexts: apmAclStatTable.setStatus('current')
apmAclStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1), ).setIndexNames((0, "F5-BIGIP-APM-MIB", "apmAclStatName"))
if mibBuilder.loadTexts: apmAclStatEntry.setStatus('current')
apmAclStatName = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1, 1), LongDisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatName.setStatus('current')
apmAclStatActionAllow = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatActionAllow.setStatus('current')
apmAclStatActionContinue = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatActionContinue.setStatus('current')
apmAclStatActionDiscard = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatActionDiscard.setStatus('current')
apmAclStatActionReject = MibTableColumn((1, 3, 6, 1, 4, 1, 3375, 2, 6, 3, 1, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: apmAclStatActionReject.setStatus('current')
bigipApmCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 3375, 2, 5, 1, 6)).setObjects(("F5-BIGIP-APM-MIB", "bigipApmGroups"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
bigipApmCompliance = bigipApmCompliance.setStatus('current')
bigipApmGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6))
apmPaStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 1)).setObjects(("F5-BIGIP-APM-MIB", "apmPaStatResetStats"), ("F5-BIGIP-APM-MIB", "apmPaStatNumber"), ("F5-BIGIP-APM-MIB", "apmPaStatName"), ("F5-BIGIP-APM-MIB", "apmPaStatConfigSyncState"), ("F5-BIGIP-APM-MIB", "apmPaStatTotalSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatTotalEstablishedStateSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatCurrentActiveSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatCurrentPendingSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatCurrentCompletedSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatUserLoggedoutSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatAdminTerminatedSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatMiscTerminatedSessions"), ("F5-BIGIP-APM-MIB", "apmPaStatAccessPolicyResultAllow"), ("F5-BIGIP-APM-MIB", "apmPaStatAccessPolicyResultDeny"), ("F5-BIGIP-APM-MIB", "apmPaStatAccessPolicyResultRedirect"), ("F5-BIGIP-APM-MIB", "apmPaStatAccessPolicyResultRedirectWithSession"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingDenyAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingRedirectAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEndingAllowAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatAdAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatClientCertAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatHttpAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatLdapAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatSecuridAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatRadiusAcctAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxFcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsLinuxPcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacFcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsMacPcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinCcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsAvAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinOsInfoAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinFcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinMcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsFwcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPcTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinPwTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinRcAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatEpsWinGpAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatExternalLogonAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatLogonAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatRaAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatRdsAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatVaAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatIeAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatLoggingAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatDecnBoxAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalInstances"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalUsages"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalSuccesses"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalFailures"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatMesgBoxAgentTotalSessVars"), ("F5-BIGIP-APM-MIB", "apmPaStatApdNoResultErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdNoSessionErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdNoDeviceInfoErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdNoTokenErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdNoSigErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdTotalMismatchErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdInvalidSigErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdMcPipelineInitErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdMcSetSessVarErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdMcPipelineCloseErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdApResultErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatApdApInternalErrors"), ("F5-BIGIP-APM-MIB", "apmPaStatAllowedRequests"), ("F5-BIGIP-APM-MIB", "apmPaStatDeniedRequests"), ("F5-BIGIP-APM-MIB", "apmPaStatVsName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmPaStatGroup = apmPaStatGroup.setStatus('current')
apmPcStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 2)).setObjects(("F5-BIGIP-APM-MIB", "apmPcStatResetStats"), ("F5-BIGIP-APM-MIB", "apmPcStatNumber"), ("F5-BIGIP-APM-MIB", "apmPcStatName"), ("F5-BIGIP-APM-MIB", "apmPcStatTotConns"), ("F5-BIGIP-APM-MIB", "apmPcStatCurConns"), ("F5-BIGIP-APM-MIB", "apmPcStatMaxConns"), ("F5-BIGIP-APM-MIB", "apmPcStatIngressRaw"), ("F5-BIGIP-APM-MIB", "apmPcStatEgressRaw"), ("F5-BIGIP-APM-MIB", "apmPcStatIngressCompressed"), ("F5-BIGIP-APM-MIB", "apmPcStatEgressCompressed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmPcStatGroup = apmPcStatGroup.setStatus('current')
apmPrStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 3)).setObjects(("F5-BIGIP-APM-MIB", "apmPrStatResetStats"), ("F5-BIGIP-APM-MIB", "apmPrStatNumber"), ("F5-BIGIP-APM-MIB", "apmPrStatName"), ("F5-BIGIP-APM-MIB", "apmPrStatClientReqBytes"), ("F5-BIGIP-APM-MIB", "apmPrStatClientRespBytes"), ("F5-BIGIP-APM-MIB", "apmPrStatServerReqBytes"), ("F5-BIGIP-APM-MIB", "apmPrStatServerRespBytes"), ("F5-BIGIP-APM-MIB", "apmPrStatClientReqs"), ("F5-BIGIP-APM-MIB", "apmPrStatClientResps"), ("F5-BIGIP-APM-MIB", "apmPrStatServerReqs"), ("F5-BIGIP-APM-MIB", "apmPrStatServerResps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmPrStatGroup = apmPrStatGroup.setStatus('current')
apmPgStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 4)).setObjects(("F5-BIGIP-APM-MIB", "apmPgStatResetStats"), ("F5-BIGIP-APM-MIB", "apmPgStatNumber"), ("F5-BIGIP-APM-MIB", "apmPgStatName"), ("F5-BIGIP-APM-MIB", "apmPgStatAgentName"), ("F5-BIGIP-APM-MIB", "apmPgStatInstances"), ("F5-BIGIP-APM-MIB", "apmPgStatUsages"), ("F5-BIGIP-APM-MIB", "apmPgStatSuccesses"), ("F5-BIGIP-APM-MIB", "apmPgStatFailures"), ("F5-BIGIP-APM-MIB", "apmPgStatErrors"), ("F5-BIGIP-APM-MIB", "apmPgStatSessionVars"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmPgStatGroup = apmPgStatGroup.setStatus('current')
apmPmStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 5)).setObjects(("F5-BIGIP-APM-MIB", "apmPmStatResetStats"), ("F5-BIGIP-APM-MIB", "apmPmStatNumber"), ("F5-BIGIP-APM-MIB", "apmPmStatName"), ("F5-BIGIP-APM-MIB", "apmPmStatConfigSyncState"), ("F5-BIGIP-APM-MIB", "apmPmStatInspResultError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspSessionError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspDeviceInfoError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspTokenError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspSignatureError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspDataMsmtchError"), ("F5-BIGIP-APM-MIB", "apmPmStatInspClientSignError"), ("F5-BIGIP-APM-MIB", "apmPmStatMemInitError"), ("F5-BIGIP-APM-MIB", "apmPmStatMemSessionVarError"), ("F5-BIGIP-APM-MIB", "apmPmStatMemCloseError"), ("F5-BIGIP-APM-MIB", "apmPmStatResultError"), ("F5-BIGIP-APM-MIB", "apmPmStatInternalError"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmPmStatGroup = apmPmStatGroup.setStatus('current')
apmAccessStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 6)).setObjects(("F5-BIGIP-APM-MIB", "apmAccessStatResetStats"), ("F5-BIGIP-APM-MIB", "apmAccessStatTotalSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatCurrentActiveSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatCurrentPendingSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatCurrentEndedSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatUserLoggedoutSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatAdminTerminatedSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatMiscTerminatedSessions"), ("F5-BIGIP-APM-MIB", "apmAccessStatResultAllow"), ("F5-BIGIP-APM-MIB", "apmAccessStatResultDeny"), ("F5-BIGIP-APM-MIB", "apmAccessStatResultRedirect"), ("F5-BIGIP-APM-MIB", "apmAccessStatResultRedirectWithSession"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmAccessStatGroup = apmAccessStatGroup.setStatus('current')
apmGlobalConnectivityStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 7)).setObjects(("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatResetStats"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatTotConns"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatCurConns"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatMaxConns"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatIngressRaw"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatEgressRaw"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatIngressCompressed"), ("F5-BIGIP-APM-MIB", "apmGlobalConnectivityStatEgressCompressed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmGlobalConnectivityStatGroup = apmGlobalConnectivityStatGroup.setStatus('current')
apmGlobalRewriteStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 8)).setObjects(("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatResetStats"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatClientReqBytes"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatClientRespBytes"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatServerReqBytes"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatServerRespBytes"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatClientReqs"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatClientResps"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatServerReqs"), ("F5-BIGIP-APM-MIB", "apmGlobalRewriteStatServerResps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmGlobalRewriteStatGroup = apmGlobalRewriteStatGroup.setStatus('current')
apmLeasepoolStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 9)).setObjects(("F5-BIGIP-APM-MIB", "apmLeasepoolStatResetStats"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatNumber"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatName"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatCurMembers"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatCurAssigned"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatCurFree"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatMaxAssigned"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotPickRequests"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotPickFailure"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotReserveRequests"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotReserveFailure"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotReleaseRequests"), ("F5-BIGIP-APM-MIB", "apmLeasepoolStatTotReleaseFailure"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmLeasepoolStatGroup = apmLeasepoolStatGroup.setStatus('current')
apmAclStatGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3375, 2, 5, 2, 6, 10)).setObjects(("F5-BIGIP-APM-MIB", "apmAclStatResetStats"), ("F5-BIGIP-APM-MIB", "apmAclStatNumber"), ("F5-BIGIP-APM-MIB", "apmAclStatName"), ("F5-BIGIP-APM-MIB", "apmAclStatActionAllow"), ("F5-BIGIP-APM-MIB", "apmAclStatActionContinue"), ("F5-BIGIP-APM-MIB", "apmAclStatActionDiscard"), ("F5-BIGIP-APM-MIB", "apmAclStatActionReject"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apmAclStatGroup = apmAclStatGroup.setStatus('current')
mibBuilder.exportSymbols("F5-BIGIP-APM-MIB", apmPaStatApdMcSetSessVarErrors=apmPaStatApdMcSetSessVarErrors, apmPaStatExternalLogonAgentTotalUsages=apmPaStatExternalLogonAgentTotalUsages, apmPaStatRadiusAgentTotalErrors=apmPaStatRadiusAgentTotalErrors, apmPaStatLogonAgentTotalUsages=apmPaStatLogonAgentTotalUsages, apmPcStatNumber=apmPcStatNumber, apmPrStatServerReqs=apmPrStatServerReqs, apmPaStatApdMcPipelineCloseErrors=apmPaStatApdMcPipelineCloseErrors, apmPaStatRadiusAcctAgentTotalFailures=apmPaStatRadiusAcctAgentTotalFailures, apmAccessStatResultDeny=apmAccessStatResultDeny, apmPaStatEpsWinCcAgentTotalFailures=apmPaStatEpsWinCcAgentTotalFailures, apmPaStatEpsMacFcAgentTotalErrors=apmPaStatEpsMacFcAgentTotalErrors, apmPaStatEpsFwcAgentTotalErrors=apmPaStatEpsFwcAgentTotalErrors, apmPaStatEpsWinRcAgentTotalSessVars=apmPaStatEpsWinRcAgentTotalSessVars, apmPaStatEpsWinPwTotalFailures=apmPaStatEpsWinPwTotalFailures, apmPcStatIngressRaw=apmPcStatIngressRaw, apmPaStatEpsWinGpAgentTotalUsages=apmPaStatEpsWinGpAgentTotalUsages, apmLeasepoolStatTotPickFailure=apmLeasepoolStatTotPickFailure, apmPaStatEpsFwcAgentTotalInstances=apmPaStatEpsFwcAgentTotalInstances, apmPaStatLoggingAgentTotalInstances=apmPaStatLoggingAgentTotalInstances, bigipApm=bigipApm, apmPmStatConfigSyncState=apmPmStatConfigSyncState, apmAccessStatGroup=apmAccessStatGroup, apmLeasepoolStatCurMembers=apmLeasepoolStatCurMembers, apmPmStatInspSessionError=apmPmStatInspSessionError, apmPaStatEndingAllowAgentTotalUsages=apmPaStatEndingAllowAgentTotalUsages, apmPaStatEpsWinGpAgentTotalFailures=apmPaStatEpsWinGpAgentTotalFailures, apmPaStatSecuridAgentTotalFailures=apmPaStatSecuridAgentTotalFailures, apmLeasepoolStatTotReserveRequests=apmLeasepoolStatTotReserveRequests, apmProfileAccessStat=apmProfileAccessStat, apmPaStatAccessPolicyResultRedirect=apmPaStatAccessPolicyResultRedirect, apmAclStatNumber=apmAclStatNumber, apmAclStatActionDiscard=apmAclStatActionDiscard, apmPaStatEndingAllowAgentTotalErrors=apmPaStatEndingAllowAgentTotalErrors, apmLeasepoolStatTable=apmLeasepoolStatTable, apmPgStatResetStats=apmPgStatResetStats, apmAccessStatResultRedirectWithSession=apmAccessStatResultRedirectWithSession, apmPaStatEndingAllowAgentTotalInstances=apmPaStatEndingAllowAgentTotalInstances, bigipApmCompliance=bigipApmCompliance, apmPaStatDecnBoxAgentTotalUsages=apmPaStatDecnBoxAgentTotalUsages, apmPaStatApdNoTokenErrors=apmPaStatApdNoTokenErrors, apmAclStatResetStats=apmAclStatResetStats, apmPaStatEpsWinRcAgentTotalUsages=apmPaStatEpsWinRcAgentTotalUsages, apmProfileAccessMiscStat=apmProfileAccessMiscStat, apmPaStatLogonAgentTotalSuccesses=apmPaStatLogonAgentTotalSuccesses, apmLeasepoolStatTotReserveFailure=apmLeasepoolStatTotReserveFailure, apmPrStatTable=apmPrStatTable, apmAclStatActionAllow=apmAclStatActionAllow, apmPaStatMesgBoxAgentTotalFailures=apmPaStatMesgBoxAgentTotalFailures, apmPaStatVaAgentTotalSuccesses=apmPaStatVaAgentTotalSuccesses, apmPaStatAdAgentTotalFailures=apmPaStatAdAgentTotalFailures, apmPaStatLoggingAgentTotalUsages=apmPaStatLoggingAgentTotalUsages, apmPgStatFailures=apmPgStatFailures, apmPaStatRdsAgentTotalSuccesses=apmPaStatRdsAgentTotalSuccesses, apmPcStatIngressCompressed=apmPcStatIngressCompressed, apmPaStatVaAgentTotalErrors=apmPaStatVaAgentTotalErrors, apmPaStatExternalLogonAgentTotalSessVars=apmPaStatExternalLogonAgentTotalSessVars, apmPaStatVaAgentTotalUsages=apmPaStatVaAgentTotalUsages, apmPcStatEntry=apmPcStatEntry, apmPaStatEpsLinuxPcAgentTotalSuccesses=apmPaStatEpsLinuxPcAgentTotalSuccesses, apmAccessStatResetStats=apmAccessStatResetStats, apmPaStatDecnBoxAgentTotalSuccesses=apmPaStatDecnBoxAgentTotalSuccesses, apmPaStatLdapAgentTotalFailures=apmPaStatLdapAgentTotalFailures, apmAclStatName=apmAclStatName, apmPaStatEpsWinPwTotalSuccesses=apmPaStatEpsWinPwTotalSuccesses, apmPaStatEpsMacPcAgentTotalSuccesses=apmPaStatEpsMacPcAgentTotalSuccesses, apmPaStatEndingDenyAgentTotalErrors=apmPaStatEndingDenyAgentTotalErrors, apmPaStatTotalSessions=apmPaStatTotalSessions, apmPaStatAdAgentTotalInstances=apmPaStatAdAgentTotalInstances, apmPaStatDecnBoxAgentTotalErrors=apmPaStatDecnBoxAgentTotalErrors, apmPaStatCurrentPendingSessions=apmPaStatCurrentPendingSessions, apmPaStatRaAgentTotalInstances=apmPaStatRaAgentTotalInstances, apmPaStatLdapAgentTotalSessVars=apmPaStatLdapAgentTotalSessVars, apmGlobalRewriteStatClientResps=apmGlobalRewriteStatClientResps, apmPaStatEpsWinGpAgentTotalErrors=apmPaStatEpsWinGpAgentTotalErrors, apmPaStatEntry=apmPaStatEntry, apmPaStatCurrentCompletedSessions=apmPaStatCurrentCompletedSessions, apmPaStatIeAgentTotalUsages=apmPaStatIeAgentTotalUsages, apmPaStatEpsWinRcAgentTotalSuccesses=apmPaStatEpsWinRcAgentTotalSuccesses, apmPaStatEpsWinFcAgentTotalUsages=apmPaStatEpsWinFcAgentTotalUsages, apmAclStatTable=apmAclStatTable, apmPgStatNumber=apmPgStatNumber, apmPaStatEpsLinuxFcAgentTotalSuccesses=apmPaStatEpsLinuxFcAgentTotalSuccesses, apmGlobalRewriteStatServerReqs=apmGlobalRewriteStatServerReqs, apmPaStatSecuridAgentTotalSessVars=apmPaStatSecuridAgentTotalSessVars, apmPaStatExternalLogonAgentTotalInstances=apmPaStatExternalLogonAgentTotalInstances, apmPaStatEpsLinuxPcAgentTotalFailures=apmPaStatEpsLinuxPcAgentTotalFailures, apmPaStatEndingRedirectAgentTotalSuccesses=apmPaStatEndingRedirectAgentTotalSuccesses, apmPaStatEndingAllowAgentTotalSuccesses=apmPaStatEndingAllowAgentTotalSuccesses, apmPaStatEpsMacPcAgentTotalErrors=apmPaStatEpsMacPcAgentTotalErrors, apmAccessStatMiscTerminatedSessions=apmAccessStatMiscTerminatedSessions, apmPaStatHttpAgentTotalInstances=apmPaStatHttpAgentTotalInstances, apmPaStatEpsWinCcAgentTotalUsages=apmPaStatEpsWinCcAgentTotalUsages, apmPaStatAdminTerminatedSessions=apmPaStatAdminTerminatedSessions, apmPaStatIeAgentTotalInstances=apmPaStatIeAgentTotalInstances, apmPaStatEpsMacPcAgentTotalFailures=apmPaStatEpsMacPcAgentTotalFailures, apmLeasepoolStatCurAssigned=apmLeasepoolStatCurAssigned, apmGlobalConnectivityStatMaxConns=apmGlobalConnectivityStatMaxConns, apmPrStatEntry=apmPrStatEntry, apmPaStatSecuridAgentTotalSuccesses=apmPaStatSecuridAgentTotalSuccesses, apmPmStatMemInitError=apmPmStatMemInitError, apmPaStatApdNoSigErrors=apmPaStatApdNoSigErrors, apmAccessStatCurrentEndedSessions=apmAccessStatCurrentEndedSessions, apmPaStatEpsWinCcAgentTotalErrors=apmPaStatEpsWinCcAgentTotalErrors, apmPrStatResetStats=apmPrStatResetStats, apmPrStatNumber=apmPrStatNumber, apmLeasepoolStat=apmLeasepoolStat, apmAclStatGroup=apmAclStatGroup, apmPaStatRadiusAgentTotalFailures=apmPaStatRadiusAgentTotalFailures, apmPaStatApdApResultErrors=apmPaStatApdApResultErrors, apmPrStatClientReqBytes=apmPrStatClientReqBytes, apmGlobalConnectivityStatResetStats=apmGlobalConnectivityStatResetStats, apmPaStatMesgBoxAgentTotalErrors=apmPaStatMesgBoxAgentTotalErrors, apmPmStatInternalError=apmPmStatInternalError, apmPaStatAdAgentTotalErrors=apmPaStatAdAgentTotalErrors, apmPaStatEpsMacPcAgentTotalUsages=apmPaStatEpsMacPcAgentTotalUsages, apmLeasepool=apmLeasepool, apmPaStatEpsWinPcTotalInstances=apmPaStatEpsWinPcTotalInstances, apmPaStatEpsFwcAgentTotalSuccesses=apmPaStatEpsFwcAgentTotalSuccesses, apmPaStatClientCertAgentTotalUsages=apmPaStatClientCertAgentTotalUsages, apmPaStatEpsWinCcAgentTotalSuccesses=apmPaStatEpsWinCcAgentTotalSuccesses, apmPaStatRdsAgentTotalFailures=apmPaStatRdsAgentTotalFailures, apmPaStatVaAgentTotalInstances=apmPaStatVaAgentTotalInstances, apmPaStatRadiusAcctAgentTotalUsages=apmPaStatRadiusAcctAgentTotalUsages, apmPcStatEgressRaw=apmPcStatEgressRaw, apmPrStatServerRespBytes=apmPrStatServerRespBytes, apmPaStatEpsWinRcAgentTotalInstances=apmPaStatEpsWinRcAgentTotalInstances, apmPaStatEndingRedirectAgentTotalSessVars=apmPaStatEndingRedirectAgentTotalSessVars, apmAcl=apmAcl, apmPaStatEndingDenyAgentTotalFailures=apmPaStatEndingDenyAgentTotalFailures, apmPaStatEpsWinPwTotalUsages=apmPaStatEpsWinPwTotalUsages, apmAccessStatUserLoggedoutSessions=apmAccessStatUserLoggedoutSessions, apmPrStatName=apmPrStatName, apmGlobalConnectivityStatEgressRaw=apmGlobalConnectivityStatEgressRaw, apmAccessStatCurrentPendingSessions=apmAccessStatCurrentPendingSessions, apmPmStatMemCloseError=apmPmStatMemCloseError, apmPgStatErrors=apmPgStatErrors, apmPaStatEpsAvAgentTotalFailures=apmPaStatEpsAvAgentTotalFailures, apmPaStatEpsWinFcAgentTotalInstances=apmPaStatEpsWinFcAgentTotalInstances, apmPaStatApdNoResultErrors=apmPaStatApdNoResultErrors, apmPaStatSecuridAgentTotalErrors=apmPaStatSecuridAgentTotalErrors, apmGlobalConnectivityStatIngressRaw=apmGlobalConnectivityStatIngressRaw, apmPmStatResetStats=apmPmStatResetStats, apmPaStatRadiusAgentTotalSuccesses=apmPaStatRadiusAgentTotalSuccesses, apmPmStatGroup=apmPmStatGroup, apmGlobalRewriteStatClientRespBytes=apmGlobalRewriteStatClientRespBytes, apmPaStatEpsWinCcAgentTotalSessVars=apmPaStatEpsWinCcAgentTotalSessVars, apmPaStatLoggingAgentTotalErrors=apmPaStatLoggingAgentTotalErrors, apmPaStatIeAgentTotalSessVars=apmPaStatIeAgentTotalSessVars, apmPaStatRadiusAcctAgentTotalInstances=apmPaStatRadiusAcctAgentTotalInstances, apmPaStatVaAgentTotalFailures=apmPaStatVaAgentTotalFailures, apmPaStatEpsLinuxPcAgentTotalUsages=apmPaStatEpsLinuxPcAgentTotalUsages, apmPaStatRdsAgentTotalUsages=apmPaStatRdsAgentTotalUsages, apmPaStatRadiusAcctAgentTotalSessVars=apmPaStatRadiusAcctAgentTotalSessVars, apmPmStatInspDeviceInfoError=apmPmStatInspDeviceInfoError, apmPcStatTable=apmPcStatTable, apmPaStatEpsWinOsInfoAgentTotalSuccesses=apmPaStatEpsWinOsInfoAgentTotalSuccesses, PYSNMP_MODULE_ID=bigipApm, apmPaStatApdNoDeviceInfoErrors=apmPaStatApdNoDeviceInfoErrors, apmPaStatEpsMacFcAgentTotalFailures=apmPaStatEpsMacFcAgentTotalFailures, apmPaStatEpsLinuxFcAgentTotalUsages=apmPaStatEpsLinuxFcAgentTotalUsages, apmPaStatName=apmPaStatName, apmPaStatLdapAgentTotalSuccesses=apmPaStatLdapAgentTotalSuccesses, apmPaStatRaAgentTotalErrors=apmPaStatRaAgentTotalErrors, apmPaStatAdAgentTotalSuccesses=apmPaStatAdAgentTotalSuccesses, apmPrStatServerReqBytes=apmPrStatServerReqBytes, apmPgStatInstances=apmPgStatInstances, apmPaStatLogonAgentTotalErrors=apmPaStatLogonAgentTotalErrors, apmPaStatEpsMacFcAgentTotalSuccesses=apmPaStatEpsMacFcAgentTotalSuccesses, apmPaStatAdAgentTotalSessVars=apmPaStatAdAgentTotalSessVars, apmPaStatRdsAgentTotalErrors=apmPaStatRdsAgentTotalErrors, apmPaStatEpsLinuxFcAgentTotalSessVars=apmPaStatEpsLinuxFcAgentTotalSessVars, apmPmStatInspSignatureError=apmPmStatInspSignatureError, apmPaStatEndingRedirectAgentTotalErrors=apmPaStatEndingRedirectAgentTotalErrors, apmPmStatName=apmPmStatName, apmPaStatApdApInternalErrors=apmPaStatApdApInternalErrors, apmPmStatResultError=apmPmStatResultError, apmPrStatClientRespBytes=apmPrStatClientRespBytes, apmPaStatHttpAgentTotalSuccesses=apmPaStatHttpAgentTotalSuccesses, apmPaStatIeAgentTotalErrors=apmPaStatIeAgentTotalErrors, apmPaStatApdNoSessionErrors=apmPaStatApdNoSessionErrors, apmPaStatApdMcPipelineInitErrors=apmPaStatApdMcPipelineInitErrors, apmPaStatHttpAgentTotalFailures=apmPaStatHttpAgentTotalFailures, apmPaStatEpsWinRcAgentTotalFailures=apmPaStatEpsWinRcAgentTotalFailures, apmPaStatRadiusAcctAgentTotalErrors=apmPaStatRadiusAcctAgentTotalErrors, apmPaStatEpsWinMcAgentTotalSessVars=apmPaStatEpsWinMcAgentTotalSessVars, apmPaStatEpsAvAgentTotalInstances=apmPaStatEpsAvAgentTotalInstances, apmLeasepoolStatName=apmLeasepoolStatName, apmPaStatRaAgentTotalSuccesses=apmPaStatRaAgentTotalSuccesses, apmPaStatRadiusAgentTotalUsages=apmPaStatRadiusAgentTotalUsages, apmPaStatApdTotalMismatchErrors=apmPaStatApdTotalMismatchErrors, apmPrStatGroup=apmPrStatGroup, apmPcStatTotConns=apmPcStatTotConns, apmPaStatEpsWinFcAgentTotalSessVars=apmPaStatEpsWinFcAgentTotalSessVars, apmPaStatEpsFwcAgentTotalUsages=apmPaStatEpsFwcAgentTotalUsages, apmPaStatIeAgentTotalFailures=apmPaStatIeAgentTotalFailures, apmPmStatInspTokenError=apmPmStatInspTokenError, apmPaStatSecuridAgentTotalInstances=apmPaStatSecuridAgentTotalInstances, apmPaStatVsName=apmPaStatVsName, apmPaStatRaAgentTotalUsages=apmPaStatRaAgentTotalUsages, apmPaStatExternalLogonAgentTotalFailures=apmPaStatExternalLogonAgentTotalFailures, apmPaStatEpsWinOsInfoAgentTotalSessVars=apmPaStatEpsWinOsInfoAgentTotalSessVars, apmAccessStatTotalSessions=apmAccessStatTotalSessions, apmPaStatClientCertAgentTotalErrors=apmPaStatClientCertAgentTotalErrors, apmPaStatEpsWinOsInfoAgentTotalFailures=apmPaStatEpsWinOsInfoAgentTotalFailures, apmPaStatEpsWinOsInfoAgentTotalErrors=apmPaStatEpsWinOsInfoAgentTotalErrors, apmPaStatRdsAgentTotalInstances=apmPaStatRdsAgentTotalInstances, apmPmStatInspDataMsmtchError=apmPmStatInspDataMsmtchError, apmPaStatTotalEstablishedStateSessions=apmPaStatTotalEstablishedStateSessions, apmPaStatUserLoggedoutSessions=apmPaStatUserLoggedoutSessions, apmProfileRewriteStat=apmProfileRewriteStat, apmProfileAccessAgentStat=apmProfileAccessAgentStat, apmPaStatRadiusAgentTotalInstances=apmPaStatRadiusAgentTotalInstances, apmPaStatEpsWinMcAgentTotalSuccesses=apmPaStatEpsWinMcAgentTotalSuccesses, apmPaStatEndingAllowAgentTotalSessVars=apmPaStatEndingAllowAgentTotalSessVars, apmLeasepoolStatTotReleaseFailure=apmLeasepoolStatTotReleaseFailure, apmGlobalRewriteStatServerResps=apmGlobalRewriteStatServerResps, apmLeasepoolStatResetStats=apmLeasepoolStatResetStats, apmGlobalConnectivityStatGroup=apmGlobalConnectivityStatGroup, apmPrStatServerResps=apmPrStatServerResps, apmPaStatHttpAgentTotalErrors=apmPaStatHttpAgentTotalErrors, apmPaStatEndingAllowAgentTotalFailures=apmPaStatEndingAllowAgentTotalFailures, apmPaStatMesgBoxAgentTotalUsages=apmPaStatMesgBoxAgentTotalUsages, apmPmStatTable=apmPmStatTable, apmPaStatEpsWinGpAgentTotalInstances=apmPaStatEpsWinGpAgentTotalInstances, apmPaStatEndingRedirectAgentTotalUsages=apmPaStatEndingRedirectAgentTotalUsages, apmPaStatEpsAvAgentTotalSuccesses=apmPaStatEpsAvAgentTotalSuccesses, apmPaStatEpsWinPcTotalSessVars=apmPaStatEpsWinPcTotalSessVars, apmPaStatNumber=apmPaStatNumber, apmPaStatHttpAgentTotalSessVars=apmPaStatHttpAgentTotalSessVars, apmPaStatEpsWinRcAgentTotalErrors=apmPaStatEpsWinRcAgentTotalErrors, apmPaStatEpsWinPcTotalSuccesses=apmPaStatEpsWinPcTotalSuccesses, apmProfiles=apmProfiles, apmGlobalConnectivityStatTotConns=apmGlobalConnectivityStatTotConns, apmPaStatIeAgentTotalSuccesses=apmPaStatIeAgentTotalSuccesses, apmPaStatAccessPolicyResultDeny=apmPaStatAccessPolicyResultDeny, apmPaStatEpsLinuxPcAgentTotalErrors=apmPaStatEpsLinuxPcAgentTotalErrors, apmGlobalRewriteStatGroup=apmGlobalRewriteStatGroup, apmPgStatName=apmPgStatName, apmPaStatClientCertAgentTotalInstances=apmPaStatClientCertAgentTotalInstances, apmPaStatEpsMacFcAgentTotalSessVars=apmPaStatEpsMacFcAgentTotalSessVars, apmGlobalRewriteStatClientReqBytes=apmGlobalRewriteStatClientReqBytes, apmPaStatEpsMacFcAgentTotalUsages=apmPaStatEpsMacFcAgentTotalUsages, apmPaStatEpsWinGpAgentTotalSuccesses=apmPaStatEpsWinGpAgentTotalSuccesses, apmPaStatEpsAvAgentTotalUsages=apmPaStatEpsAvAgentTotalUsages, apmPaStatEpsWinMcAgentTotalInstances=apmPaStatEpsWinMcAgentTotalInstances, apmPaStatEpsLinuxFcAgentTotalFailures=apmPaStatEpsLinuxFcAgentTotalFailures, apmPaStatVaAgentTotalSessVars=apmPaStatVaAgentTotalSessVars, apmPaStatLoggingAgentTotalSessVars=apmPaStatLoggingAgentTotalSessVars, apmPaStatEpsWinPcTotalErrors=apmPaStatEpsWinPcTotalErrors, apmPaStatLogonAgentTotalInstances=apmPaStatLogonAgentTotalInstances, bigipApmGroups=bigipApmGroups, apmGlobalConnectivityStatIngressCompressed=apmGlobalConnectivityStatIngressCompressed, apmPaStatLdapAgentTotalInstances=apmPaStatLdapAgentTotalInstances, apmPaStatEndingRedirectAgentTotalInstances=apmPaStatEndingRedirectAgentTotalInstances)
mibBuilder.exportSymbols("F5-BIGIP-APM-MIB", apmPaStatEpsWinPwTotalErrors=apmPaStatEpsWinPwTotalErrors, apmLeasepoolStatMaxAssigned=apmLeasepoolStatMaxAssigned, apmPaStatExternalLogonAgentTotalErrors=apmPaStatExternalLogonAgentTotalErrors, apmPaStatClientCertAgentTotalSuccesses=apmPaStatClientCertAgentTotalSuccesses, apmPaStatEpsWinMcAgentTotalFailures=apmPaStatEpsWinMcAgentTotalFailures, apmPgStatUsages=apmPgStatUsages, apmAccessStatCurrentActiveSessions=apmAccessStatCurrentActiveSessions, apmPaStatEpsWinGpAgentTotalSessVars=apmPaStatEpsWinGpAgentTotalSessVars, apmPaStatAllowedRequests=apmPaStatAllowedRequests, apmPmStatNumber=apmPmStatNumber, apmPaStatLogonAgentTotalFailures=apmPaStatLogonAgentTotalFailures, apmPaStatGroup=apmPaStatGroup, apmGlobalRewriteStat=apmGlobalRewriteStat, apmPaStatRadiusAcctAgentTotalSuccesses=apmPaStatRadiusAcctAgentTotalSuccesses, apmPaStatEpsMacPcAgentTotalInstances=apmPaStatEpsMacPcAgentTotalInstances, apmGlobalConnectivityStatEgressCompressed=apmGlobalConnectivityStatEgressCompressed, apmGlobalRewriteStatServerReqBytes=apmGlobalRewriteStatServerReqBytes, apmPaStatEpsWinFcAgentTotalSuccesses=apmPaStatEpsWinFcAgentTotalSuccesses, apmPaStatEpsFwcAgentTotalSessVars=apmPaStatEpsFwcAgentTotalSessVars, apmLeasepoolStatEntry=apmLeasepoolStatEntry, apmLeasepoolStatTotPickRequests=apmLeasepoolStatTotPickRequests, apmPaStatSecuridAgentTotalUsages=apmPaStatSecuridAgentTotalUsages, apmPgStatSuccesses=apmPgStatSuccesses, apmPaStatClientCertAgentTotalSessVars=apmPaStatClientCertAgentTotalSessVars, apmPaStatEpsWinMcAgentTotalErrors=apmPaStatEpsWinMcAgentTotalErrors, apmPrStatClientResps=apmPrStatClientResps, apmGlobalConnectivityStatCurConns=apmGlobalConnectivityStatCurConns, apmPaStatEpsWinPwTotalSessVars=apmPaStatEpsWinPwTotalSessVars, apmPgStatSessionVars=apmPgStatSessionVars, apmPaStatMiscTerminatedSessions=apmPaStatMiscTerminatedSessions, apmPgStatEntry=apmPgStatEntry, apmPaStatAccessPolicyResultRedirectWithSession=apmPaStatAccessPolicyResultRedirectWithSession, apmPgStatAgentName=apmPgStatAgentName, apmPaStatDecnBoxAgentTotalInstances=apmPaStatDecnBoxAgentTotalInstances, apmPaStatClientCertAgentTotalFailures=apmPaStatClientCertAgentTotalFailures, apmPaStatEndingRedirectAgentTotalFailures=apmPaStatEndingRedirectAgentTotalFailures, apmPaStatEpsLinuxPcAgentTotalInstances=apmPaStatEpsLinuxPcAgentTotalInstances, apmPaStatDecnBoxAgentTotalFailures=apmPaStatDecnBoxAgentTotalFailures, apmPgStatGroup=apmPgStatGroup, apmPaStatEpsLinuxFcAgentTotalErrors=apmPaStatEpsLinuxFcAgentTotalErrors, apmPaStatEpsWinPcTotalUsages=apmPaStatEpsWinPcTotalUsages, apmPaStatHttpAgentTotalUsages=apmPaStatHttpAgentTotalUsages, apmPcStatName=apmPcStatName, apmPaStatEpsLinuxPcAgentTotalSessVars=apmPaStatEpsLinuxPcAgentTotalSessVars, apmAclStatActionReject=apmAclStatActionReject, apmPcStatMaxConns=apmPcStatMaxConns, apmPaStatRadiusAgentTotalSessVars=apmPaStatRadiusAgentTotalSessVars, apmPaStatEndingDenyAgentTotalSessVars=apmPaStatEndingDenyAgentTotalSessVars, apmPgStatTable=apmPgStatTable, apmPaStatRdsAgentTotalSessVars=apmPaStatRdsAgentTotalSessVars, apmPmStatInspResultError=apmPmStatInspResultError, apmProfileConnectivityStat=apmProfileConnectivityStat, apmPcStatGroup=apmPcStatGroup, apmPaStatAccessPolicyResultAllow=apmPaStatAccessPolicyResultAllow, apmPaStatEpsWinOsInfoAgentTotalUsages=apmPaStatEpsWinOsInfoAgentTotalUsages, apmPaStatEpsFwcAgentTotalFailures=apmPaStatEpsFwcAgentTotalFailures, apmPrStatClientReqs=apmPrStatClientReqs, apmGlobalRewriteStatResetStats=apmGlobalRewriteStatResetStats, apmPaStatLogonAgentTotalSessVars=apmPaStatLogonAgentTotalSessVars, apmPcStatCurConns=apmPcStatCurConns, apmPaStatConfigSyncState=apmPaStatConfigSyncState, apmLeasepoolStatTotReleaseRequests=apmLeasepoolStatTotReleaseRequests, apmPmStatMemSessionVarError=apmPmStatMemSessionVarError, apmPaStatEpsWinFcAgentTotalErrors=apmPaStatEpsWinFcAgentTotalErrors, apmAccessStatResultAllow=apmAccessStatResultAllow, apmPaStatLoggingAgentTotalFailures=apmPaStatLoggingAgentTotalFailures, apmPaStatDecnBoxAgentTotalSessVars=apmPaStatDecnBoxAgentTotalSessVars, apmPaStatDeniedRequests=apmPaStatDeniedRequests, apmGlobalRewriteStatClientReqs=apmGlobalRewriteStatClientReqs, apmAccessStatAdminTerminatedSessions=apmAccessStatAdminTerminatedSessions, apmPaStatEndingDenyAgentTotalUsages=apmPaStatEndingDenyAgentTotalUsages, apmPaStatEpsMacPcAgentTotalSessVars=apmPaStatEpsMacPcAgentTotalSessVars, apmLeasepoolStatCurFree=apmLeasepoolStatCurFree, apmPaStatMesgBoxAgentTotalSuccesses=apmPaStatMesgBoxAgentTotalSuccesses, apmPaStatEpsWinCcAgentTotalInstances=apmPaStatEpsWinCcAgentTotalInstances, apmLeasepoolStatGroup=apmLeasepoolStatGroup, apmPaStatEndingDenyAgentTotalInstances=apmPaStatEndingDenyAgentTotalInstances, apmGlobalConnectivityStat=apmGlobalConnectivityStat, apmPaStatTable=apmPaStatTable, apmGlobalRewriteStatServerRespBytes=apmGlobalRewriteStatServerRespBytes, apmPaStatEpsAvAgentTotalErrors=apmPaStatEpsAvAgentTotalErrors, apmPaStatEpsWinPwTotalInstances=apmPaStatEpsWinPwTotalInstances, apmAccessStat=apmAccessStat, apmPaStatEpsWinOsInfoAgentTotalInstances=apmPaStatEpsWinOsInfoAgentTotalInstances, apmPcStatEgressCompressed=apmPcStatEgressCompressed, apmPmStatInspClientSignError=apmPmStatInspClientSignError, apmPaStatRaAgentTotalSessVars=apmPaStatRaAgentTotalSessVars, apmPaStatResetStats=apmPaStatResetStats, apmPcStatResetStats=apmPcStatResetStats, apmPaStatMesgBoxAgentTotalSessVars=apmPaStatMesgBoxAgentTotalSessVars, apmPaStatEpsLinuxFcAgentTotalInstances=apmPaStatEpsLinuxFcAgentTotalInstances, apmLeasepoolStatNumber=apmLeasepoolStatNumber, apmPaStatCurrentActiveSessions=apmPaStatCurrentActiveSessions, apmPaStatEpsWinFcAgentTotalFailures=apmPaStatEpsWinFcAgentTotalFailures, apmAclStatActionContinue=apmAclStatActionContinue, apmPaStatEpsWinMcAgentTotalUsages=apmPaStatEpsWinMcAgentTotalUsages, apmPaStatLdapAgentTotalUsages=apmPaStatLdapAgentTotalUsages, apmPmStatEntry=apmPmStatEntry, apmAclStatEntry=apmAclStatEntry, apmPaStatMesgBoxAgentTotalInstances=apmPaStatMesgBoxAgentTotalInstances, apmAccessStatResultRedirect=apmAccessStatResultRedirect, apmPaStatEpsWinPcTotalFailures=apmPaStatEpsWinPcTotalFailures, apmAclStat=apmAclStat, apmPaStatExternalLogonAgentTotalSuccesses=apmPaStatExternalLogonAgentTotalSuccesses, apmPaStatRaAgentTotalFailures=apmPaStatRaAgentTotalFailures, apmPaStatApdInvalidSigErrors=apmPaStatApdInvalidSigErrors, apmPaStatEpsAvAgentTotalSessVars=apmPaStatEpsAvAgentTotalSessVars, apmPaStatLdapAgentTotalErrors=apmPaStatLdapAgentTotalErrors, apmPaStatLoggingAgentTotalSuccesses=apmPaStatLoggingAgentTotalSuccesses, apmPaStatEpsMacFcAgentTotalInstances=apmPaStatEpsMacFcAgentTotalInstances, apmPaStatAdAgentTotalUsages=apmPaStatAdAgentTotalUsages, apmPaStatEndingDenyAgentTotalSuccesses=apmPaStatEndingDenyAgentTotalSuccesses)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
35901516fe1969c53a2c4655c745d03c2b532bdb | 36aea5790cc01c652326f6f5a5722f13ee9d498b | /minihack/capname.py | 86b963c86d8403610ea7a9da222fde26542bfa71 | [] | no_license | kev158/NguyenTrongDuc-c4t | 2d9162a69dfa87e8ee24b93e4a72dc8811031cff | e1fba30f06c77bb7ab3271475d7ba6da9771ae09 | refs/heads/master | 2020-04-18T22:04:00.041524 | 2019-04-20T14:59:10 | 2019-04-20T14:59:10 | 167,783,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | x=input("Ho:")
y=input("Ten:")
print("ho va ten:", x,y)
| [
"trongduc811@gmail.com"
] | trongduc811@gmail.com |
c709879b1fee60eecdc644534c5f072428a76609 | 31eaed64b0caeda5c5fe3603609402034e6eb7be | /python_zumbi/py_functions/ler_e_gravar_arquivo_CSV.py | e8d6a926b35a01ceccefbd8155a6cdd818c3a912 | [] | no_license | RaphaelfsOliveira/workspace_python | 93657b581043176ecffb5783de208c0a00924832 | 90959697687b9398cc48146461750942802933b3 | refs/heads/master | 2021-01-11T17:39:49.574875 | 2017-06-28T20:55:43 | 2017-06-28T20:55:43 | 79,814,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | import csv
arq_name = "test"
title = 'perfumes'
name_prod = 'perfume-feminino'
url_prod = 'http://www.epocacosmeticos.com.br/perfumes/perfume-feminino'
rows = ['teste','teste']
def save_urls(arq_name, rows):
arq = csv.writer(open(arq_name + '.csv', "w"))
arq.writerow(rows)
print(rows)
#print(arq)
| [
"raphaelbrf@gmail.com"
] | raphaelbrf@gmail.com |
3be5e6031a6351f732e4aa3e3ecf6dc74d11eb6c | f5c62bab2e95bb2dc6986ba271662ade8cae4da0 | /docs/PythonSAI/LineProperties.py | c3e7401f40524540e570646be946433183820cfd | [] | no_license | Has3ong/X3DViewer | d211b159c29523e61158eddc015bb320e4ba7c9d | c629305c24b5c25fd41d3a46816efbf1f74d0092 | refs/heads/master | 2021-06-25T16:36:46.278469 | 2021-01-03T11:26:02 | 2021-01-03T11:26:02 | 180,564,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | from . import *
# LineProperties defines a concrete node interface that extends interface X3DAppearanceChildNode.
class CLineProperties(CX3DAppearanceChildNode):
m_strNodeName = "LineProperties"
def __init__(self):
self.m_strNodeName = "LineProperties"
self.m_Parent = [None]
self.children = []
self.DEF = ""
self.USE = ""
self.n_Count = -1
self.depth = 0
# Return boolean result from SFBool inputOutput field named "applied"
def getApplied (self):
pass
# Assign boolean value to SFBool inputOutput field named "applied"
def setApplied (self, value):
pass
# Return int result [] from SFInt32 inputOutput field named "linetype"
def getLinetype (self):
pass
# Assign int value [] to SFInt32 inputOutput field named "linetype"
def setLinetype (self, value):
pass
# Return float result [] from SFFloat inputOutput field named "linewidthScaleFactor"
def getLinewidthScaleFactor (self):
pass
# Assign float value [] to SFFloat inputOutput field named "linewidthScaleFactor"
def setLinewidthScaleFactor (self, value):
pass
# ===== methods for fields inherited from parent interfaces =====
# Return X3DMetadataObject result (using a properly typed node or X3DPrototypeInstance) from SFNode inputOutput field named "metadata"
def getMetadata (self):
pass
# Assign X3DMetadataObject value (using a properly typed node) to SFNode inputOutput field named "metadata"
def setMetadata1 (self, node):
pass
# Assign X3DMetadataObject value (using a properly typed protoInstance)
def setMetadata2 (self, protoInstance):
pass | [
"khsh5592@naver.com"
] | khsh5592@naver.com |
4dc0710a308eb43121ff85c314929338cc1ad68d | f98c45d0079479b10c8276693dc31c704ccc087f | /api/apps/goods/models.py | 9f7696a669fbacebb0c26a81978d4226843c2828 | [
"MIT"
] | permissive | TasHole/tokyo | b78c84d31b5c459a8a508fd671151a825db55835 | d4e0b2cce2aae53d93cb2bbbd2ca12ff0aa6a219 | refs/heads/master | 2020-12-21T13:29:31.626154 | 2019-10-12T03:03:34 | 2019-10-12T03:03:34 | 236,445,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,857 | py | from datetime import datetime
from django.db import models
class GoodsCategory(models.Model):
"""
商品カテゴリー
"""
CATEGORY_TYPE = (
(1, "一級カテゴリー"),
(2, "二級カテゴリー"),
(3, "三級カテゴリー")
)
name = models.CharField(default="", max_length=50, verbose_name="カテゴリー名", help_text="カテゴリー名")
code = models.CharField(default="", max_length=30, verbose_name="カテゴリーコード", help_text="カテゴリーコード")
desc = models.TextField(default="", verbose_name="カテゴリー説明", help_text="カテゴリー説明")
category_type = models.IntegerField(choices=CATEGORY_TYPE, verbose_name="カテゴリーレベル", help_text="カテゴリーレベル")
parent_category = models.ForeignKey("self", null=True, blank=True, verbose_name="親カテゴリー", help_text="親カテゴリー",
on_delete=models.CASCADE, related_name="sub_cat")
is_tab = models.BooleanField(default=False, verbose_name="ナビなのか", help_text="ナビなのか")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品カテゴリー"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsCategoryBrand(models.Model):
"""
ブランド名
"""
category = models.ForeignKey(GoodsCategory, related_name="brands", null=True, blank=True,
verbose_name="商品カテゴリー名", on_delete=models.CASCADE)
name = models.CharField(default="", max_length=30, verbose_name="ブランド名", help_text="ブランド名")
desc = models.CharField(default="", max_length=200, verbose_name="ブランド説明", help_text="ブランド説明")
image = models.ImageField(max_length=200, upload_to="brands/")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "ブランド"
verbose_name_plural = verbose_name
db_table = "goods_goodsbrand"
def __str__(self):
return self.name
class Goods(models.Model):
"""
商品
"""
category = models.ForeignKey(GoodsCategory, null=True, blank=True,
verbose_name="商品カテゴリー", on_delete=models.CASCADE)
goods_sn = models.CharField(max_length=50, default="", verbose_name="商品識別番号")
name = models.CharField(max_length=100, verbose_name="商品名")
click_num = models.IntegerField(default=0, verbose_name="クリック数")
sold_num = models.IntegerField(default=0, verbose_name="販売数")
fav_num = models.IntegerField(default=0, verbose_name="お気に入り登録数")
goods_num = models.IntegerField(default=0, verbose_name="在庫数")
market_price = models.FloatField(default=0, verbose_name="原価")
shop_price = models.FloatField(default=0, verbose_name="販売値段")
goods_brief = models.TextField(max_length=500, verbose_name="商品説明")
ship_free = models.BooleanField(default=True, verbose_name="送料負担")
goods_front_image = models.ImageField(max_length=200, upload_to="goods/images/",
null=True, blank=True, verbose_name="表紙")
is_new = models.BooleanField(default=False, verbose_name="新品なのか")
is_hot = models.BooleanField(default=False, verbose_name="売れているのか")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class GoodsImage(models.Model):
"""
商品swiperImages
"""
goods = models.ForeignKey(Goods, verbose_name="商品", related_name="images", on_delete=models.CASCADE)
image = models.ImageField(upload_to="", verbose_name="画像", null=True, blank=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "商品swiperImages"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class Banner(models.Model):
"""
swiper用の商品image
"""
goods = models.ForeignKey(Goods, verbose_name="商品", on_delete=models.CASCADE)
image = models.ImageField(upload_to="banner", verbose_name="ホームページswiper用画像")
index = models.IntegerField(default=0, verbose_name="swiper順番")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "swiper用の商品image"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class IndexAd(models.Model):
category = models.ForeignKey(GoodsCategory, related_name="category",
verbose_name="商品カテゴリー", on_delete=models.CASCADE)
goods = models.ForeignKey(Goods, related_name='goods', on_delete=models.CASCADE)
class Meta:
verbose_name = "ホームページ商品カテゴリー広告"
verbose_name_plural = verbose_name
def __str__(self):
return self.goods.name
class HotSearchWords(models.Model):
"""
人気キーワード
"""
keywords = models.CharField(default="", max_length=20, verbose_name="人気キーワード")
index = models.IntegerField(default=0, verbose_name="並び順")
add_time = models.DateTimeField(default=datetime.now, verbose_name="挿入時間")
class Meta:
verbose_name = "人気キーワード"
verbose_name_plural = verbose_name
def __str__(self):
return self.keywords | [
"txy1226052@gmail.com"
] | txy1226052@gmail.com |
518d0d93c4e09549a7524784f57e483f1929f267 | 59deb6307b1a55a043f944f00d9e929b97ca042c | /softdashdj/wsgi.py | c0672262f57c4bfc068f3db09f1f37cc97083d0a | [] | no_license | abykal/softdashdj | d0be04f4786bf1f72b8ea77e5875da9d104ab503 | e969d348461698f1e45392b3007952732361e9ce | refs/heads/main | 2023-06-17T10:15:19.536349 | 2021-07-09T09:16:55 | 2021-07-09T09:16:55 | 383,747,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for softdashdj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'softdashdj.settings')
application = get_wsgi_application()
| [
"abyabrkal@gmail.com"
] | abyabrkal@gmail.com |
bc7d6aaa70db83515dcb9be6218ae064862630e5 | b563e04d91dcd5169b83fbfba840c16a882f7357 | /filechanger.py | 4384db93fcb61bc1ec51600c25b56c4ab7ff7868 | [] | no_license | tomfa/filechanger | 7765d248799815f9980a3151a63f16738ef9c389 | e9d0f4e6543557eb4bb8fd84a5d3cc74466d81f5 | refs/heads/master | 2020-07-06T02:11:21.699066 | 2016-11-24T18:58:29 | 2016-11-24T18:58:29 | 73,969,133 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,591 | py | #coding: utf-8
import os, sys
IGNORED_FILE_ENDINGS = [".py", ".bat"]
VALID_ACTIONS = ["lower", "upper", "remove", "insert", "replace"]
def _get_file_ext(file):
"""
>>> _get_file_ext("C:\\Program Files\test.txt")
'.txt'
>>> _get_file_ext("/Users/tomas/.bash_rc")
''
"""
return os.path.splitext(file)[1]
def _confirm_continue(text):
raw_input(text)
def _input(helptext, valid_input=None):
inputtext = raw_input(helptext + ("\n(" + ", ".join(valid_input) + ")" if valid_input else "") + "\n> ")
if valid_input:
while not inputtext in valid_input:
print("Valid input is " + ", ".join(valid_input))
inputtext = raw_input("> ").lower()
return inputtext
def _print_usage():
print("Usage:")
print("python renamer ACTION POSITION (TEXT)")
print("- ACTION: replace, insert, remove, upper, lower")
print("- POSITION: position eller start_position:end_position")
def _get_files_to_be_converted(dir, recursive):
if not recursive:
return [os.path.abspath(os.path.join(dir, x)) for x in os.listdir(dir) if not (os.path.isdir(x) or _get_file_ext(x) in IGNORED_FILE_ENDINGS)]
convert_files = []
for root, subFolders, files in os.walk(dir):
if ".git" in root:
continue
for file in files:
if not _get_file_ext(file) in IGNORED_FILE_ENDINGS:
convert_files.append(os.path.join(root, file))
return convert_files
def _to_upper_case(text, start_pos_pos, end_pos):
"""
>>> _to_upper_case("tomas", 3, 3)
'toMas'
>>> _to_upper_case("tomas", 3, 50)
'toMAS'
"""
if end_pos == None:
end_pos = start_pos_pos
first_part = text[:start_pos_pos-1]
last_part = text[end_pos:]
upper_case_text = text[start_pos_pos-1:end_pos].upper()
return first_part + upper_case_text + last_part
def _to_lower_case(text, start_pos_pos, end_pos):
"""
>>> _to_lower_case("TOMAS", 3, 4)
'TOmaS'
>>> _to_lower_case("TOMS", 3, 50)
'TOms'
>>> _to_lower_case("tomas", 3, 50)
'tomas'
"""
if end_pos == None:
end_pos = start_pos_pos
first_part = text[:start_pos_pos-1]
last_part = text[end_pos:]
lower_case_text = text[start_pos_pos-1:end_pos].lower()
return first_part + lower_case_text + last_part
def _remove_at_pos(text, start_pos_pos, end_pos):
"""
>>> _remove_at_pos("tomas", 3, 4)
'tos'
>>> _remove_at_pos("tomas", 3, 50)
'to'
"""
if end_pos == None:
return text[:start_pos_pos-1] + text[start_pos_pos:]
else:
return text[:start_pos_pos-1] + text[end_pos:]
def _handle_special_input(arg):
if arg == "help":
_print_usage()
return True
if arg == "test":
import doctest
doctest.testmod()
return True
return False
def _insert_at_pos(text, insert_text, pos):
"""
>>> _insert_at_pos("tomas", "Fi", 3)
'toFimas'
>>> _insert_at_pos("tomas", "Fi", 1)
'Fitomas'
"""
return text[:pos-1] + insert_text + text[pos-1:]
def upper(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'upper', start_pos, end_pos, recursive)
def lower(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'lower', start_pos, end_pos, recursive)
def remove(working_dir, start_pos, end_pos, recursive=False):
rename(working_dir, 'remove', start_pos, end_pos, recursive)
def insert(working_dir, start_pos, insert_text, recursive=False):
rename(working_dir, 'insert', start_pos, start_pos, recursive, insert_text)
def replace(working_dir, start_pos, end_pos, insert_text, recursive=False):
rename(working_dir, 'replace', start_pos, end_pos, recursive, insert_text)
def rename(working_dir, action, start_pos, end_pos, recursive, insert_text=None, quiet=True):
has_confirmed = False
current_filename = ""
changed_files = 0
files_to_be_converted = _get_files_to_be_converted(working_dir, recursive)
for file in files_to_be_converted:
path, filename = os.path.split(file)
if (action.lower() == "lower"):
new_filename = _to_lower_case(filename, start_pos, end_pos)
elif (action.lower() == "upper"):
new_filename = _to_upper_case(filename, start_pos, end_pos)
elif (action.lower() == "remove"):
new_filename = _remove_at_pos(filename, start_pos, end_pos)
elif (action.lower() == "insert"):
new_filename = _insert_at_pos(filename, insert_text, start_pos)
elif (action.lower() == "replace"):
new_filename = _remove_at_pos(filename, start_pos, end_pos)
new_filename = _insert_at_pos(new_filename, insert_text, start_pos)
else:
_print_usage()
return
if new_filename == filename:
continue
if (not has_confirmed and not quiet):
print("Will run (in folder " + working_dir + ") "
+ action + " on position " + str(start_pos) + (" to " + str(end_pos) if end_pos else "") +
(" with the word " + insert_text if insert_text else ""))
print("Original filename: " + filename)
print("New filename: " + new_filename)
_confirm_continue("Click Enter to continue...")
has_confirmed = True
changed_files += 1
new_filepath = os.path.join(path, new_filename)
print(file + " > " + new_filename)
os.rename(file, new_filepath)
if changed_files:
print("Changed " + str(changed_files) + " files")
else:
print("No files to be changed.")
if __name__ == '__main__':
if len(sys.argv) > 1:
if _handle_special_input(sys.argv[1]):
exit()
working_dir = sys.argv[1]
else:
working_dir = _input("Path to directory: ")
while not os.path.exists(working_dir):
print("Not a valid directory")
working_dir = _input("Path to directory: ")
working_dir = os.path.abspath(working_dir)
action = None
if len(sys.argv) > 2:
action = sys.argv[2]
if not action or action not in VALID_ACTIONS:
action = _input("Choose action", VALID_ACTIONS)
if len(sys.argv) > 3:
position = sys.argv[3]
if ":" in position:
start_pos = int(position.split(":")[0])
end_pos = int(position.split(":")[1])
else:
start_pos = int(position)
end_pos = None
else:
start_pos = int(_input("Start position: "))
if action != "insert":
end_pos = int(_input("End position: "))
else:
end_pos = None
insert_text = False
recursive = True
if action in ["insert", "replace"]:
if (len(sys.argv) > 4):
insert_text = sys.argv[4]
else:
insert_text = _input("Text to be inserted: ")
if (len(sys.argv) > 5):
recursive = sys.argv[5] != "nonrecursive"
else:
if (len(sys.argv) < 4):
recursive = _input("Should subfolders be included?", ["y", "n"]) == "y"
else:
if (len(sys.argv) > 4):
recursive = sys.argv[4] != "nonrecursive"
else:
recursive = _input("Should subfolders be included?", ["y", "n"]) == "y"
rename(working_dir, action, start_pos, end_pos, recursive, insert_text, False)
_confirm_continue("Click Enter to exit...")
| [
"tomas@webutvikling.org"
] | tomas@webutvikling.org |
7539bdceb81f567363b6d422c1297a92195ff9db | a17b81c68b9d6cba745f00aa6b1b26ca7dcd5cbc | /Final/Source Code/NBAstats/manage.py | 73181ca30d8ea012642eaf883fbd8527f97d1775 | [] | no_license | julianbcook/NBAStats | 2cd689a04c09cd66f1f6046bf087fd7721ed581e | 561c7e360fd12a2260f1b56f20bfd1d124550ff6 | refs/heads/master | 2020-04-09T06:14:34.077974 | 2019-08-09T02:36:30 | 2019-08-09T02:36:30 | 160,104,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NBAstats.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"jherrera1497@gmail.com"
] | jherrera1497@gmail.com |
d966c7041e447cabc4bf303ad9feffacf3c1e20b | 62902de8b202780ec95a63ea89667062fa8530ee | /2018_Fall/Data Structure/experiment 3/Huffmann Zipper.py | b0099316955a7a44062f8348f3716722c881a5cf | [] | no_license | ToniChopp/USTC-CS | 41af3cbc7aad57e6c1debbde840d1720e75b07c5 | 1cb8fd686e720fb7c98a95bcc4af7da5952c5e77 | refs/heads/master | 2023-08-28T03:59:15.172727 | 2021-06-08T15:40:30 | 2021-06-08T15:40:30 | 353,741,356 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 9,235 | py | import six
import tkinter as tk
import sys
window=tk.Tk()
window.title('Huffman Zipper')
window.geometry('500x400')
class HuffNode(object):
def get_wieght(self):
raise NotImplementedError(
"The Abstract Node Class doesn't define 'get_wieght'")
def isleaf(self):
raise NotImplementedError(
"The Abstract Node Class doesn't define 'isleaf'")
class LeafNode(HuffNode):
def __init__(self, value=0, freq=0, ):
super(LeafNode, self).__init__()
# 节点的值
self.value = value
self.wieght = freq
def isleaf(self):
return True
def get_wieght(self):
return self.wieght
def get_value(self):
return self.value
class IntlNode(HuffNode):
def __init__(self, left_child=None, right_child=None):
super(IntlNode, self).__init__()
# 节点的值
self.wieght = left_child.get_wieght() + right_child.get_wieght()
# 节点的左右子节点
self.left_child = left_child
self.right_child = right_child
def isleaf(self):
return False
def get_wieght(self):
return self.wieght
def get_left(self):
return self.left_child
def get_right(self):
return self.right_child
class HuffTree(object):
def __init__(self, flag, value=0, freq=0, left_tree=None, right_tree=None):
super(HuffTree, self).__init__()
if flag == 0:
self.root = LeafNode(value, freq)
else:
self.root = IntlNode(left_tree.get_root(), right_tree.get_root())
def get_root(self):
return self.root
def get_wieght(self):
return self.root.get_wieght()
def traverse_huffman_tree(self, root, code, char_freq):
if root.isleaf():
char_freq[root.get_value()] = code
print(("it = %c and freq = %d code = %s") % (chr(root.get_value()), root.get_wieght(), code))
return None
else:
self.traverse_huffman_tree(root.get_left(), code + '0', char_freq)
self.traverse_huffman_tree(root.get_right(), code + '1', char_freq)
def buildHuffmanTree(list_hufftrees):
while len(list_hufftrees) > 1: # 按照weight 对huffman树进行从小到大的排序
list_hufftrees.sort(key=lambda x: x.get_wieght()) # 跳出weight 最小的两个huffman编码树
temp1 = list_hufftrees[0]
temp2 = list_hufftrees[1]
list_hufftrees = list_hufftrees[2:] # 构造一个新的huffman树
newed_hufftree = HuffTree(1, 0, 0, temp1, temp2) # 存入数组
list_hufftrees.append(newed_hufftree)
return list_hufftrees[0]
def compress(): # 以二进制的方式打开文件
global input
global output
inputfilename=input.get()
outputfilename=output.get()
f = open(inputfilename, 'rb')
filedata = f.read() # 获取文件的字节总数
filesize = f.tell() # 统计频率
# 保存在字典 char_freq中
char_freq = {}
for x in range(filesize):
tem = filedata[x]
if tem in char_freq.keys():
char_freq[tem] = char_freq[tem] + 1
else:
char_freq[tem] = 1
for tem in char_freq.keys():
print(tem, ' : ', char_freq[tem])
# 构造huffman编码树数组
list_hufftrees = []
for x in char_freq.keys():
# 使用 HuffTree的构造函数定义一棵只包含一个叶节点的Huffman树
tem = HuffTree(0, x, char_freq[x], None, None)
# 将其添加到数组 list_hufftrees 当中
list_hufftrees.append(tem)
#频率的信息
# 保存叶节点的个数
length = len(char_freq.keys())
output = open(outputfilename, 'wb')
a4 = length & 255
length = length >> 8
a3 = length & 255
length = length >> 8
a2 = length & 255
length = length >> 8
a1 = length & 255
output.write(six.int2byte(a1))
output.write(six.int2byte(a2))
output.write(six.int2byte(a3))
output.write(six.int2byte(a4))
# 每个值及其出现的频率的信息
# 遍历字典 char_freq
for x in char_freq.keys():
output.write(six.int2byte(x))
temp = char_freq[x]
# 同样出现的次数是int型,分成四个字节写入到压缩文件当中
a4 = temp & 255
temp = temp >> 8
a3 = temp & 255
temp = temp >> 8
a2 = temp & 255
temp = temp >> 8
a1 = temp & 255
output.write(six.int2byte(a1))
output.write(six.int2byte(a2))
output.write(six.int2byte(a3))
output.write(six.int2byte(a4))
# 构造huffman编码树,并且获取到每个字符对应的编码
tem = buildHuffmanTree(list_hufftrees)
tem.traverse_huffman_tree(tem.get_root(), '', char_freq)
# 开始对文件进行压缩
code = ''
for i in range(filesize):
key = filedata[i]
code = code + char_freq[key]
out = 0
while len(code) > 8:
for x in range(8):
out = out << 1
if code[x] == '1':
out = out | 1
code = code[8:]
output.write(six.int2byte(out))
out = 0
# 处理剩下来的不满8位的code
output.write(six.int2byte(len(code)))
out = 0
for i in range(len(code)):
out = out << 1
if code[i] == '1':
out = out | 1
for i in range(8 - len(code)):
out = out << 1
# 把最后一位给写入到文件当中
output.write(six.int2byte(out))
output.close()
def decompress(): # 读取文件
global input
global output
inputfilename=input.get()
outputfilename=output.get()
f = open(inputfilename, 'rb')
filedata = f.read()
# 获取文件的字节总数
filesize = f.tell()
a1 = filedata[0]
a2 = filedata[1]
a3 = filedata[2]
a4 = filedata[3]
j = 0
j = j | a1
j = j << 8
j = j | a2
j = j << 8
j = j | a3
j = j << 8
j = j | a4
leaf_node_size = j
# 读取频率
# 构造一个字典char_freq一遍重建 Huffman编码树
char_freq = {}
for i in range(leaf_node_size):
c = filedata[4 + i * 5 + 0]
a1 = filedata[4 + i * 5 + 1]
a2 = filedata[4 + i * 5 + 2]
a3 = filedata[4 + i * 5 + 3]
a4 = filedata[4 + i * 5 + 4]
j = 0
j = j | a1
j = j << 8
j = j | a2
j = j << 8
j = j | a3
j = j << 8
j = j | a4
print(c, j)
char_freq[c] = j
# 重建huffman 编码树
list_hufftrees = []
for x in char_freq.keys():
tem = HuffTree(0, x, char_freq[x], None, None)
list_hufftrees.append(tem)
tem = buildHuffmanTree(list_hufftrees)
tem.traverse_huffman_tree(tem.get_root(), '', char_freq)
# 使用步骤3中重建的huffman编码树,对压缩文件进行解压缩
output = open(outputfilename, 'wb')
code = ''
currnode = tem.get_root()
for x in range(leaf_node_size * 5 + 4, filesize):
# python3
c = filedata[x]
for i in range(8):
if c & 128:
code = code + '1'
else:
code = code + '0'
c = c << 1
while len(code) > 24:
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
if code[0] == '1':
currnode = currnode.get_right()
else:
currnode = currnode.get_left()
code = code[1:]
# 处理最后 24位
sub_code = code[-16:-8]
last_length = 0
for i in range(8):
last_length = last_length << 1
if sub_code[i] == '1':
last_length = last_length | 1
code = code[:-16] + code[-8:-8 + last_length]
while len(code) > 0:
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
if code[0] == '1':
currnode = currnode.get_right()
else:
currnode = currnode.get_left()
code = code[1:]
if currnode.isleaf():
tem_byte = six.int2byte(currnode.get_value())
output.write(tem_byte)
currnode = tem.get_root()
output.close()
tk.Label(window,text='input filename',font=('HGMaruGothicMPRO',16),fg='white',bg='dark blue').place(x=100,y=100)
tk.Label(window,text='output filename',font=('HGMaruGothicMPRO',16),fg='white',bg='dark blue').place(x=100,y=200)
input=tk.StringVar()
entry=tk.Entry(window,textvariable=input,font=('HGMaruGothicMPRO',16),width=16,show=None).place(x=100,y=130)
output=tk.StringVar()
entry=tk.Entry(window,textvariable=output,font=('HGMaruGothicMPRO',16),width=16,show=None).place(x=100,y=230)
ch1=tk.Button(window,text='compress',font=('HGMaruGothicMPRO',8),command=compress,width=10).place(x=100,y=300)
ch2=tk.Button(window,text='decompress',font=('HGMaruGothicMPRO',8),command=decompress,width=10).place(x=300,y=300)
window.mainloop()
| [
"wrs792141579@mail.ustc.edu.cn"
] | wrs792141579@mail.ustc.edu.cn |
13291215a88b57ada0920e080d9c3b7a1ef0eb47 | 64e23db686e6d1e7edd74ca5952a16408d320ca8 | /daemon/lvm.py | 2369697565859ac07dea534a71bd4f106d3f2b1d | [] | no_license | pengxiaojun/ipsan | 37514472cb1a2306684c664adb41d49a685034b5 | 11b39f55417489d6c7be481f908c01dd6b75ca63 | refs/heads/master | 2021-01-18T21:34:17.533550 | 2016-03-30T08:26:29 | 2016-03-30T08:26:29 | 39,625,764 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | # -*-coding: utf-8 -*-
import os
import sqlite3
import logging
import subprocess
from common import grgrant_prog
from common import database
def fetch_all_lvm():
with sqlite3.connect(database) as conn:
c = conn.cursor()
try:
c.execute('select name, path from lvms')
r = c.fetchall()
return r
except Exception as e:
logging.exception(e)
return None
def active_lvm(name, path):
args = [grgrant_prog, '/sbin/vgchange', '-a', 'y']
try:
r = subprocess.check_output(args, universal_newlines=True)
logging.info("Active lvm:%s path:%s success." % (name, path))
except subprocess.CalledProcessError as e:
logging.info("Active lvm:%s path:%s failure:" % (name, path))
logging.exception(e)
def check_lvm():
rs = fetch_all_lvm()
if rs is None:
return
for r in rs:
name = r[0]
path = r[1]
if not os.path.exists(path):
active_lvm(name, path)
# if __name__ == '__main__':
# check_lvm()
| [
"pengxj@outlook.com"
] | pengxj@outlook.com |
1fc2d9f87296f7c77e0b2ba4b7fe8747035f1088 | a032cab55c78de1ca1e5b4a50a24a93b9fd67ceb | /proto/media/yuvfile.py | 572ad0000b1ce55e9b45600de65fc36af1548385 | [] | no_license | xinjuehu/ns3-study | 47a80545dedd4f1e0c663ef57d24ba67dc03574c | 994b80169058f6b14cf0d8659e7e3864274a90fd | refs/heads/master | 2021-08-29T12:27:51.373068 | 2017-12-14T00:57:15 | 2017-12-14T00:57:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,044 | py | #!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
import numpy as np
from PIL import Image
import os
from jmenc import YUVEncode
class VmafComp(object):
def __init__(self, enc):
self._encoder = enc
self._root = os.path.split(os.path.realpath(__file__))[0] + os.sep
self._lib = self._root + '..' + os.sep + 'lib' + os.sep
def comp(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
pass
def comp_yuv(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = self._lib + "psnr yuv420p {2} {3} {0} {1}"
cmd = cmd.format(w, h, source, f1)
ret = self._encoder.wait_proc(cmd)
avg = []
for i in ret:
i = i.strip().split()[-1]
avg.append(float(i))
avg = np.mean(avg)
cmd = self._lib + "ssim yuv420p {2} {3} {0} {1}"
cmd = cmd.format(w, h, source, f1)
ret = self._encoder.wait_proc(cmd)
avg2 = []
for i in ret:
i = i.strip().split()
if i[0] == 'ssim:':
avg2.append(float(i[-1]))
avg2 = np.mean(avg2)
return avg, avg2
class FFComp(object):
def __init__(self, enc):
self._encoder = enc
def comp(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = "ffmpeg -i {2} -pix_fmt yuv420p -s {0}x{1} -i {3}" + \
" -lavfi \"ssim='stats_file={3}_ssim.log';[0:v][1:v]psnr='stats_file={3}_psnr.log'\" -f null -"
cmd = cmd.format(w, h, source, f1)
self._encoder.wait_proc(cmd)
return self.read_log(f1)
def comp_yuv(self, w, h, source, f1):
assert os.path.exists(source)
assert os.path.exists(f1)
cmd = "ffmpeg -s {0}x{1} -i {2} -s {0}x{1} -i {3}" + \
" -lavfi \"ssim='stats_file={3}_ssim.log';[0:v][1:v]psnr='stats_file={3}_psnr.log'\" -f null -"
cmd = cmd.format(w, h, source, f1)
self._encoder.wait_proc(cmd)
return self.read_log(f1)
@staticmethod
def read_log(f1):
with open(f1 + "_ssim.log") as f:
c1 = f.readlines()
with open(f1 + "_psnr.log") as f:
c2 = f.readlines()
ssim, psnr = [float(i.strip().split('All:')[1].split()[0]) for i in c1], \
[float(i.strip().split('psnr_y:')[1].split()[0]) for i in c2]
psnr = np.mean(psnr)
ssim = np.mean(ssim)
return psnr, ssim
class YUVUtil(object):
def __init__(self, name, width=352, height=288, comp=FFComp):
self._w = width
self._h = height
self._size = (self._w, self._h)
self._root = os.path.split(os.path.realpath(__file__))[0] + os.sep + '..' + os.sep
if os.path.exists(name):
self._source = name
self._output = self._root + 'output' + os.sep + 'default_'
else:
self._source = self._root + 'input' + os.sep + name + '.yuv'
self._output = self._root + 'output' + os.sep + name + '_'
self._encoder = YUVEncode(self._output)
self._comp = comp(self._encoder)
def get_output(self):
return os.sep.join(self._output.split(os.sep)[:-1]) + os.sep
def get_source(self):
return self._source
def read420(self):
with open(self._source, 'rb') as f:
while f.tell() < os.fstat(f.fileno()).st_size:
y = f.read(self._w * self._h)
u = f.read(self._w * self._h / 4)
v = f.read(self._w * self._h / 4)
y = np.array(bytearray(y), dtype=np.uint8)
u = np.array(bytearray(u), dtype=np.uint8)
v = np.array(bytearray(v), dtype=np.uint8)
y = y.reshape((self._h, self._w))
u = u.reshape((self._h / 2, self._w / 2))
v = v.reshape((self._h / 2, self._w / 2))
assert len(y) == self._h
assert len(y[0]) == self._w
yield y, u, v
def yuv_ffmpeg_h264(self, output='sp.264'):
return self._encoder.ffmpeg_h264(self._source, self._size, output)
def comp(self, f1='sp.mp4', in_file=None):
if in_file is None:
in_file = self._source
if not os.path.exists(f1):
f1 = self._output + f1
return self._comp.comp(self._w, self._h, in_file, f1)
def comp_yuv(self, f1=None, in_file=None):
if in_file is None:
in_file = self._source
if f1 is None:
f1 = self._source
if not os.path.exists(f1):
f1 = self._output + f1
return self._comp.comp_yuv(self._w, self._h, in_file, f1)
def rgb2img(self, (r, g, b)):
im_r = Image.frombytes('L', self._size, r.tostring())
im_g = Image.frombytes('L', self._size, g.tostring())
im_b = Image.frombytes('L', self._size, b.tostring())
return Image.merge('RGB', (im_r, im_g, im_b))
def yuv2img(self, (y, u, v)):
u = np.repeat(u, 2, 0)
u = np.repeat(u, 2, 1)
v = np.repeat(v, 2, 0)
v = np.repeat(v, 2, 1)
im_r = Image.frombytes('L', self._size, y.tostring())
im_g = Image.frombytes('L', self._size, u.tostring())
im_b = Image.frombytes('L', self._size, v.tostring())
return Image.merge('YCbCr', (im_r, im_g, im_b))
def yuv_split(self, (y, u, v), (w, h), (off_w, off_h)):
assert w + off_w <= self._w
assert h + off_h <= self._h
u = np.repeat(u, 2, 0)
u = np.repeat(u, 2, 1)
v = np.repeat(v, 2, 0)
v = np.repeat(v, 2, 1)
y = y[off_h:off_h + h, off_w:off_w + w]
u = u[off_h:off_h + h, off_w:off_w + w]
v = v[off_h:off_h + h, off_w:off_w + w]
u = u[::2, ::2]
v = v[::2, ::2]
return y, u, v
def img2yuv(self):
pass
def yuv_merge(self):
pass
def show_img(self):
index = 0
for frm in self.read420():
co = self.yuv2img(frm)
co.save(self._output + str(index) + '.jpg')
index += 1
return index
def split_run(self, tmp_size, tmp_off, output='sp.yuv'):
output = self._output + output
if os.path.exists(output):
return output
with open(output, 'wb') as f:
for frm in self.read420():
data = self.yuv_split(frm, tmp_size, tmp_off)
f.write(''.join([i.tostring() for i in data]))
return output
def make_tile(self, tile):
x, y = tile
w = self._w / x
h = self._h / y
x = 0
y = 0
data = []
for j in range(0, self._h, h):
for i in range(0, self._w, w):
ret = self.split_run((w, h), (i, j), "sp_{0}_{1}_{2}_{3}.yuv".format(x, y, w, h))
print 'MAKE', ret
data.append(ret)
y += 1
y = 0
x += 1
return data, w, h
| [
"zxyqwe_2004@126.com"
] | zxyqwe_2004@126.com |
d0c3ca61479d518272c9503a1c470f4db684357a | b1b45393aefc27f85d3fd3e454929b035c390be6 | /tests/func/test_bucket_it.py | b4351630ed79a6ae9ff4201d4ba9288d5a507d50 | [
"Apache-2.0"
] | permissive | Jiaming1999/baas-sdk-python | c5f579f2ae0ff591ab49af8c167845515f2c0019 | 83b0916af0e4f3167a232ac7eb06b82331adc172 | refs/heads/master | 2022-01-07T19:12:31.184660 | 2019-01-22T09:49:17 | 2019-01-22T09:49:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | # -*- coding: utf-8 -*-
import pytest
from requests import HTTPError
import necbaas as baas
from . import util
class TestBucket(object):
service = None
# type: baas.Service
def setup(self):
self.masterService = util.create_service(master=True)
def teardown(self):
for b in self.bucket.query():
self.bucket.remove(b["name"])
def test_upsert(self):
"""正常にバケット作成・更新できること"""
self.bucket = baas.Buckets(self.masterService, "object")
# create
create_res = self.bucket.upsert("bucket1")
assert create_res["name"] == "bucket1"
assert create_res["description"] == ""
assert "ACL" in create_res
assert "contentACL" in create_res
# update
with pytest.raises(HTTPError) as ei:
self.bucket.upsert("bucket1")
status_code = ei.value.response.status_code
assert status_code == 400
def test_upsert_with_options(self):
"""正常にバケット作成・更新できること"""
self.bucket = baas.Buckets(self.masterService, "file")
# create
desc = "test bucket description"
acl = {"u": ["g:authenticated"]}
content_acl = {"d": ["g:anonymous"]}
res = self.bucket.upsert("bucket1", desc=desc, acl=acl, content_acl=content_acl)
assert res["name"] == "bucket1"
assert res["description"] == desc
assert res["ACL"]["u"] == ["g:authenticated"]
assert res["contentACL"]["d"] == ["g:anonymous"]
# update
desc = "upsert description"
acl = {"u": ["g:anonymous"]}
content_acl = {"d": ["g:authenticated"]}
res = self.bucket.upsert("bucket1", desc=desc, acl=acl, content_acl=content_acl)
assert res["name"] == "bucket1"
assert res["description"] == desc
assert res["ACL"]["u"] == ["g:anonymous"]
assert res["contentACL"]["d"] == ["g:authenticated"]
def test_query(self):
"""正常にバケット全件検索できること"""
self.bucket = baas.Buckets(self.masterService, "object")
num = 10
for i in range(num):
self.bucket.upsert("bucket" + str(i))
# query
results = self.bucket.query()
assert len(results) == num
for i in range(num):
assert results[i]["name"] == "bucket" + str(i)
def test_get(self):
"""正常にバケット取得できること"""
self.bucket = baas.Buckets(self.masterService, "object")
create_res = self.bucket.upsert("bucket1")
# get
get_res = self.bucket.get("bucket1")
assert create_res == get_res
def test_remove(self):
"""正常にバケット削除できること"""
self.bucket = baas.Buckets(self.masterService, "object")
self.bucket.upsert("bucket1")
# remove
self.bucket.remove("bucket1")
| [
"tamura.jn@ncos.nec.co.jp"
] | tamura.jn@ncos.nec.co.jp |
88fd6306ddf23894d2552a4e2bc87e2b89a734df | e489172f6e49e1239db56c047a78a29a6ffc0b36 | /via_code_decode/code_category.py | ab1f0d754b5245b8d99d0e949b26421de5effc09 | [] | no_license | eksotama/prln-via-custom-addons | f05d0059353ae1de89ccc8d1625a896c0215cfc7 | f2b44a8af0e7bee87d52d258fca012bf44ca876f | refs/heads/master | 2020-03-25T19:49:08.117628 | 2015-12-01T07:29:43 | 2015-12-01T07:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2013 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
class code_category(osv.osv):
_name = 'code.category'
_description = 'Code Category'
_columns = {
'name': fields.char('Code Category', size=64, readonly=False, required=True, translate=True, select=True, help="Register Code Category"),
'pinned': fields.boolean('Pinned', readonly=True, help="This is to mark whether the code category is 'pinned', i.e. cannot be deleted. Can be used by modules to force existence of the code category."),
}
_defaults = {
'pinned' : False,
}
## unlink
#
# unlink intercepts the main unlink function to prevent deletion of pinned record.
#
def unlink(self, cr, uid, ids, context=None):
for _obj in self.pool.get('code.category').browse(cr, uid, ids, context=context):
if _obj.pinned:
raise osv.except_osv(_('Error !'), _('Pinned Code Category cannot be deleted.'))
return super(code_category, self).unlink(cr, uid, ids, context=context)
code_category()
| [
"aero@aero.(none)"
] | aero@aero.(none) |
386d526236ceef1e4accd80ace256f69374c7b69 | 266f073facf1754763af372f3b4433337161f91a | /memegen/domain/template.py | 4c61cdbd88b6c6134c5c7f14b2935ed1e4fbc5d5 | [
"MIT"
] | permissive | jkloo/memegen | 7717104eedc0db1cad15673b426f1ebdb5119445 | 9360486066b52ede528f0c45671f81ebb168e3b3 | refs/heads/master | 2020-04-05T18:55:54.182899 | 2015-06-19T13:47:24 | 2015-06-19T13:47:24 | 37,665,345 | 0 | 0 | null | 2015-06-18T14:46:22 | 2015-06-18T14:46:22 | null | UTF-8 | Python | false | false | 950 | py | import os
from .text import Text
class Template:
"""Blank image to generate a meme."""
DEFAULTS = ("default.png", "default.jpg")
def __init__(self, key,
name=None, lines=None, aliases=None, link=None, root=None):
self.key = key
self.name = name or ""
self.lines = lines or []
self.aliases = aliases or []
self.link = link or ""
self.root = root
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.name < other.name
@property
def path(self):
for default in self.DEFAULTS:
path = os.path.join(self.root, self.key, default)
if os.path.isfile(path):
return path
return None
@property
def default(self):
text = Text('/'.join(self.lines))
return text.path
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
b92f07f377e480c10c7ff21ab2ee63a1f3876c6c | 045b48b2e1a75bd45c87b5c352c3b6fc340b8902 | /Chapter06/bookmarks/images/urls.py | ca9fb60bc859d3e04c9aaa7944715a8c384e05c3 | [
"MIT"
] | permissive | fifo2019/Django-2-by-Example | c56bbad3a1c18242e832c452703619ec5e50caf4 | 1f8be1f5717e5c83feac8ded5d8c78a3b64ce864 | refs/heads/master | 2020-07-27T15:10:09.347076 | 2019-09-19T19:10:58 | 2019-09-19T19:10:58 | 209,136,653 | 1 | 0 | MIT | 2019-09-17T19:11:26 | 2019-09-17T19:11:26 | null | UTF-8 | Python | false | false | 375 | py | from django.urls import path
from . import views
app_name = 'images'
urlpatterns = [
path('create/', views.image_create, name='create'),
path('detail/<int:id>/<slug:slug>/', views.image_detail, name='detail'),
path('like/', views.image_like, name='like'),
path('', views.image_list, name='list'),
path('ranking/', views.image_ranking, name='create'),
]
| [
"prajaktam@packtpub.com"
] | prajaktam@packtpub.com |
0d2b5b6c9b8399006a452a9d38b656e956d77c3a | 9e86aa077e7d4f10e20d7bc7de1f53c5ad51716d | /t01/t01_05_e0925.py | 7cf9e40fd1f8b8795fc38c5a295fd99a94b24ae4 | [] | no_license | Klevtsovskyi/PythonAud1 | 16b77e7aaf6b7ce951cca56b8f1be386b9bb4864 | 2f6fb9dbd2960543f5a12ccca4a5bf99db21c835 | refs/heads/master | 2023-09-03T13:36:08.743459 | 2021-11-04T10:10:55 | 2021-11-04T10:10:55 | 296,278,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
import math
x1, y1, x2, y2, x3, y3 = [float(d) for d in input().split()]
a = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
b = ((x3 - x2) ** 2 + (y3 - y2) ** 2) ** 0.5
c = ((x3 - x1) ** 2 + (y3 - y1) ** 2) ** 0.5
P = a + b + c
p = P / 2
S = math.sqrt(p * (p - a) * (p - b) * (p - c))
print("%.4f %.4f" % (P, S))
| [
"avklevtsovskiy@gmail.com"
] | avklevtsovskiy@gmail.com |
b941f4fec6db3324f517391c833d36bd9deb602e | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /stdlib_exam/unicodedata-example-1.py | 8ab800f4c75d0ac65e9f6fbc5d28206808558553 | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 396 | py | import unicodedata
for char in [u"A", u"-", u"1", u"\N{LATIN CAPITAL LETTER O WITH DIAERESIS}"]:
print repr(char),
print unicodedata.category(char),
print repr(unicodedata.decomposition(char)),
print unicodedata.decimal(char, None),
print unicodedata.numeric(char, None)
## u'A' Lu '' None None
## u'-' Pd '' None None
## u'1' Nd '' 1 1.0
## u'Ö' Lu '004F 0308' None None
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
b73000ba07270793015730c3be257dec3a98ded0 | 4bb1a23a62bf6dc83a107d4da8daefd9b383fc99 | /work/abc032_c2.py | 45cd9b0ea7fccb2e9ffe3042836077ee7d77a58a | [] | no_license | takushi-m/atcoder-work | 0aeea397c85173318497e08cb849efd459a9f6b6 | f6769f0be9c085bde88129a1e9205fb817bb556a | refs/heads/master | 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | n,k = map(int, input().split())
al = [int(input()) for _ in range(n)]
if 0 in al:
print(n)
exit()
r = 0
l = 0
m = 1
res = 0
while l<n:
while r<n and m*al[r]<=k:
m *= al[r]
r += 1
res = max(res, r-l)
if r==l:
r += 1
else:
m //= al[l]
l += 1
print(res) | [
"takushi-m@users.noreply.github.com"
] | takushi-m@users.noreply.github.com |
5f2a5eb29af62914d46eb9bdd3a8b12e5253115d | 8dd53a5d1820ae5a3efe799381a90c977afd32c4 | /contrib/devtools/copyright_header.py | 8ffcca9432a127d16002bcc5aa79aef9ddf47f4a | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | mulecore/mulecoin | 8b654817a1b78c9e98f96bfef5febaca23347f64 | e52131742938ae433463f32680837981a5cedc0f | refs/heads/master | 2023-03-28T05:37:53.552271 | 2021-03-27T03:22:13 | 2021-03-27T03:22:13 | 351,796,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,084 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_mulecoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_mulecoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_mulecoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_mulecoin_Secp256k1Context.h',
# auto generated:
'src/univalue/lib/univalue_escapes.h',
'src/qt/mulecoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Mulecoin Core developers\n",
"The Mulecoin Core developers \n",
"Mulecoin Core Developers\n",
"the Mulecoin Core developers\n",
"The Mulecoin developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a mulecoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Mulecoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Mulecoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a mulecoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Copyright (c) 2017-2019 The Raven Core developers
// Copyright (c) 2020-2021 The Mulecoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Mulecoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Mulecoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the mulecoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Mulecoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| [
"root@DESKTOP-AOBIGEQ.localdomain"
] | root@DESKTOP-AOBIGEQ.localdomain |
1ed13503cfc75a9de9161f83af19c119e4aa1a45 | baefe2e480adee987cf8e2b6be33da89931b0694 | /02练习/练习题01.py | f6b3b8e4a4f3c515b40976960dbf5d9c822243b5 | [] | no_license | KingTom1/StudyBySelf | d92963c606b79696f0a22a3d48c2bec707f4a653 | c6d5326c5b6a7fd74b55ac255ee8bf20cebd199b | refs/heads/master | 2020-04-04T22:08:49.617029 | 2019-01-29T06:00:37 | 2019-01-29T06:00:37 | 156,311,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | ##2.#计算一个12.5m*16.7m的矩形房间的面积和周长
a=12.5
b=16.7
mj=a*b
zc=2*(a+b)
print('面积=',mj,' 周长=',zc)
##3.#怎么得到9/2的小数结果
c=float(9/2)
print(c)
##4.#python计算中7*7*7*7.可以有多少种写法
print(pow(7,4))
print(7**4)
#写程序将温度从华氏温度转换为摄氏温度。转换公式为C=5/9*(F-32)
##F=input()
#C=5/9*(int(F)-32)
#print(C)
#一家商场在降价促销。如果购买金额50-100元(包含50元和100元)之间,会给10%的折扣,
# 如果购买金额大于100元会给20%折扣。编写一程序,询问购买价格,再显示出折扣(%10或20%)和最终价格
a=123
def ttt(a):
if a>50 and a<100:
c=0.1;
if a>100:
c=0.2
return c
print(ttt(a))
#7.#判断一个数n能同时被3和5整除
a=111
if a%3==0 and a%5==0:
print(a,'可以被整除');
else:
print(a, '不可以被整除');
#求1+2+3+...+100(第三种)
#14.#3个人在餐厅吃饭,想分摊饭费。总共花费35.27美元,他们还想给15%的消费。每个人该怎么付钱
print(35.27*1.15/3)
#16、打印10到1的数字:
a=10
while a>0:
print(a)
a-=1;
# 22.#嵌套循环输出10-50中个位带有1-5的所有数字
for i in range(10,50):
if str(i)[1] in ["1","2","3","4","5"]:
print(str(i)[0])
print(i);
#23、输入1-127的ascii码并输出对应字符
for i in range(1,128):
print(chr(i)) | [
"38772091+KingTom1@users.noreply.github.com"
] | 38772091+KingTom1@users.noreply.github.com |
71619690ce1315a1467d2da14697223edb31bfb4 | 195915dab8406c2e934d0ffa8c500b1317c5e6f1 | /bestrestra/settings.py | 220f1de76ec4f3342e69561c80dc947ed02197e7 | [] | no_license | theparadoxer02/bestrestra | 28c2e46ae124a7496d889933daefe3c36dbbe9a2 | 13dccc988ee78eebc685111cb486a8c1342deb3c | refs/heads/master | 2020-12-24T19:51:07.521744 | 2017-03-26T08:09:18 | 2017-03-26T08:09:18 | 86,217,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py | """
Django settings for bestrestra project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'na-%mmzwa4(a%9erh$fsqxs_)4ur_-$sbeof6u!2%ptq)u4xn&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bestrestra.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'bestresta/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bestrestra.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'clashhacks',
'USER': 'abhi'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
import dj_database_url
DATABASES['default'] = dj_database_url.config()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
DEBUG = False
try:
from .local_settings import *
except ImportError:
pass | [
"abhimanyu98986@gmail.com"
] | abhimanyu98986@gmail.com |
f2759daadeefa6b3f075de304b18660a2ca0c449 | 5b4c803f68e52849a1c1093aac503efc423ad132 | /UnPyc/tests/tests/CFG/2/pass/pass_try+finally_while_.py | 258bc6132acf68332fcb6f7045d21347c4336fcd | [] | no_license | Prashant-Jonny/UnPyc | 9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c | 4b9d4ab96dfc53a0b4e06972443e1402e9dc034f | refs/heads/master | 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | while 1:
try:
pass
finally:
pass
| [
"d.v.kornev@gmail.com"
] | d.v.kornev@gmail.com |
6e1a0f4eb1de8bb8c2e36e6c77fecf3cd02327e4 | 57237351cde7421ab42ca9a4acf563126e0c88b0 | /lianJiaProject/spiders/lianjia.py | e53e2a6f3d90178b7a64468eaa60eb15facec64a | [] | no_license | swarosky44/LianJiaSpider | 2a85d0dd22280223a6827169807115035fa9711e | 12855f4d0a8f7edd89f356eefe011891f2b5d29f | refs/heads/master | 2020-05-21T15:37:15.070565 | 2017-03-15T10:24:34 | 2017-03-15T10:24:34 | 84,630,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,033 | py | # -*- coding: utf-8 -*-
import scrapy
import requests
import re
import time
import pymysql
from BeautifulSoup import BeautifulSoup
from ..items import LianjiaprojectItem
class LianJiaProject(scrapy.Spider):
name = 'lianjiaspider'
start_urls = ['http://sh.lianjia.com/zufang/']
conn = pymysql.connect(
host='127.0.0.1',
unix_socket='/tmp/mysql.sock',
user='root',
passwd='lisen930120',
db='mysql',
charset='utf8'
)
cur = conn.cursor()
cur.execute('USE lianjia')
def start_request(self):
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \ Safari/537.36 SE 2.X MetaSr 1.0'
headers = { 'User-Agent': user_agent }
yield scrapy.Request(url=self.start_urls, headers=headers, method='GET', callback=self.parse)
def parse(self, response):
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \ Safari/537.36 SE 2.X MetaSr 1.0'
headers = { 'User-Agent': user_agent }
bs = BeautifulSoup(response.body)
area_list = bs.find('div', { 'id': 'filter-options' }).find('dl', { 'class': 'dl-lst clear' }).find('dd').find('div', { 'class': 'option-list gio_district' }).findAll('a')
for area in area_list:
try:
area_han = area.string
area_pin = area['href'].split('/')[2]
if area_pin:
area_url = 'http://sh.lianjia.com/zufang/{}/'.format(area_pin)
yield scrapy.Request(
url=area_url,
headers=headers,
callback=self.detail_url,
meta={'id1': area_han, 'id2': area_pin}
)
except Exception:
pass
def detail_url(self, response):
for i in range(1, 101):
url = 'http://sh.lianjia.com/zufang/{}/d{}'.format(response.meta['id2'], str(i))
time.sleep(2)
try:
contents = requests.get(url)
bs = BeautifulSoup(contents.content)
houselist = bs.find('ul', { 'id': 'house-lst' }).findAll('li')
for house in houselist:
try:
item = LianjiaprojectItem()
infoPanel = house.find('div', { 'class': 'info-panel' })
infoTitle = infoPanel.find('h2')
infoCols = infoPanel.findAll('div', { 'class': re.compile(r'^col-\d') })
item['title'] = infoTitle.find('a', { 'name': 'selectDetail' })['title']
item['community'] = infoCols[0].find('div', { 'class': 'where' }).find('a', { 'class': 'laisuzhou' }).find('span', { 'class': 'nameEllipsis' }).string
item['model'] = infoCols[0].find('div', { 'class': 'where' }).findAll('span')[0].string
item['area'] = infoCols[0].find('div', { 'class': 'where' }).findAll('span')[1].string.replace(' ', '')
item['watch_num'] = infoCols[2].find('div', { 'class': 'square' }).find('div').find('span', { 'class': 'num' }).string
item['time'] = infoCols[1].findAll('div', { 'class': 'price-pre' })[0].string[0:11].strip('\n')
item['price'] = infoCols[1].findAll('span', { 'class': 'num' })[0].string
item['link'] = infoTitle.find('a', { 'name': 'selectDetail' })['href']
item['city'] = response.meta["id1"]
url_detail = 'http://sh.lianjia.com{}'.format(item['link'])
mapDic = self.get_latitude(url_detail)
item['latitude'] = mapDic['latitude']
item['longitude'] = mapDic['longitude']
self.store_item(item)
except Exception:
pass
yield item
except Exception:
pass
def get_latitude(self, url):
mapDic = {}
content = requests.get(url)
bs = BeautifulSoup(content.content)
mapDom = bs.find('div', { 'id': 'zoneMap' })
mapDic = {
'latitude': mapDom['latitude'],
'longitude': mapDom['longitude']
}
time.sleep(3)
return mapDic
def store_item(self, item):
try:
sql = "INSERT INTO houses (title, community, model, area, watch_num, time, price, link, latitude, longitude, city) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.cur.execute(sql, (item['title'], item['community'], item['model'], item['area'], item['watch_num'], item['time'], item['price'], item['link'], item['latitude'], item['longitude'], item['city']))
self.conn.commit()
except Exception as e:
print(e)
cur.close()
conn.close()
| [
"swarosky44@gmail.com"
] | swarosky44@gmail.com |
65247a93f148848e738b1cc6e7f182c86c232c55 | 14bbd65228c3130676857a5023e9c1a1dd6485c9 | /env/bin/chardetect | c0db3b0948fc7827e9abcc6006729938092970b4 | [] | no_license | VadimShurhal/start_UI_automation | 087325833c53841cdddc5b47245401bd9e7271b2 | 7abd4cf3607d3a8193401b101823e54d54a333ef | refs/heads/master | 2020-03-07T01:21:14.660460 | 2018-03-28T18:41:05 | 2018-03-28T18:41:05 | 127,181,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/mastaforka/Desktop/city/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vadimshurhal@gmail.com"
] | vadimshurhal@gmail.com | |
53e45f6370195df23f3ec7adf095359fb79e466e | 478c4a01990f514813d4dd05faac39d18f0cdc9f | /clang/utils/creduce_crash_testcase.py | 7affc59f42ac64f487de68eb42f717b932ee9a5c | [
"Apache-2.0",
"LLVM-exception",
"NCSA"
] | permissive | capt-hb/llvm-project | 6632477ecc28c07244dfe961dd7b25143f84b51f | 3214ab1279d10920828877865b3286266600666d | refs/heads/master | 2022-10-09T04:24:03.973787 | 2020-06-08T14:12:29 | 2020-06-08T14:12:29 | 212,033,396 | 0 | 2 | null | 2019-10-01T07:08:10 | 2019-10-01T07:08:09 | null | UTF-8 | Python | false | false | 68,098 | py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
import argparse
import re
import os
import tempfile
import shutil
import shlex
import subprocess
import sys
import resource
import typing
from abc import ABCMeta, abstractmethod
from enum import Enum
from pathlib import Path
lit_path = Path(__file__).parent.parent.parent / "llvm/utils/lit"
if not lit_path.exists():
sys.exit("Cannot find lit in expected path " + str(lit_path))
sys.path.insert(1, str(lit_path))
from lit.llvm.config import LLVMConfig, FindTool, ToolSubst
import lit.TestRunner
try:
from colors import blue, red, green, bold
except ImportError:
print("Install the ansicolors package for coloured output.")
# noinspection PyUnusedLocal
def blue(s, bg=None, style=None):
return s
bold = blue
red = blue
green = blue
options = None # type: Options
def verbose_print(*args, **kwargs):
global options
if options.verbose:
print(*args, **kwargs)
def extremely_verbose_print(*args, **kwargs):
global options
if options.extremely_verbose:
print(*args, **kwargs)
def quote_cmd(cmd: typing.List[str]):
return " ".join(shlex.quote(s) for s in cmd)
def die(*args):
sys.exit(red(" ".join(map(str, args)), style="bold"))
def run(cmd: list, **kwargs):
print(cmd, kwargs)
subprocess.check_call(list(map(str, cmd)), **kwargs)
class ErrorKind(Enum):
CRASH = tuple()
INFINITE_LOOP = (b"INFINITE LOOP:", )
FATAL_ERROR = (b"fatal error:", b"LLVM ERROR:", b"*** Bad machine code:")
AddressSanitizer_ERROR = (b"ERROR: AddressSanitizer:", )
class LitSubstitutionHandler(object):
class _FakeLitConfig(object):
def __init__(self, args: "Options"):
self.params = dict(CHERI_CAP_SIZE="16")
self.quiet = True
def note(self, msg):
print(blue(msg))
def fatal(self, msg):
sys.exit(msg)
class _FakeLitParams(object):
def __init__(self, args: "Options"):
self.available_features = set()
self.substitutions = []
self.llvm_tools_dir = str(args.bindir)
self.environment = os.environ.copy()
self.name = "reduce-crash"
# Don't matter but are needed for clang substitutions
self.target_triple = "x86_64-unknown-linux-gnu"
self.host_triple = "x86_64-unknown-linux-gnu"
def __init__(self, args: "Options"):
llvm_config = LLVMConfig(LitSubstitutionHandler._FakeLitConfig(args), LitSubstitutionHandler._FakeLitParams(args))
llvm_config.use_default_substitutions()
# Not really required but makes debugging tests easier
llvm_config.use_clang()
llvm_config.add_cheri_tool_substitutions(["llc", "opt", "llvm-mc"])
llvm_tools = [
'dsymutil', 'lli', 'lli-child-target', 'llvm-ar', 'llvm-as',
'llvm-bcanalyzer', 'llvm-config', 'llvm-cov', 'llvm-cxxdump', 'llvm-cvtres',
'llvm-diff', 'llvm-dis', 'llvm-dwarfdump', 'llvm-exegesis', 'llvm-extract',
'llvm-isel-fuzzer', 'llvm-ifs', 'llvm-install-name-tool',
'llvm-jitlink', 'llvm-opt-fuzzer', 'llvm-lib',
'llvm-link', 'llvm-lto', 'llvm-lto2', 'llvm-mc', 'llvm-mca',
'llvm-modextract', 'llvm-nm', 'llvm-objcopy', 'llvm-objdump',
'llvm-pdbutil', 'llvm-profdata', 'llvm-ranlib', 'llvm-rc', 'llvm-readelf',
'llvm-readobj', 'llvm-rtdyld', 'llvm-size', 'llvm-split', 'llvm-strings',
'llvm-strip', 'llvm-tblgen', 'llvm-undname', 'llvm-c-test', 'llvm-cxxfilt',
'llvm-xray', 'yaml2obj', 'obj2yaml', 'yaml-bench', 'verify-uselistorder',
'bugpoint', 'llc', 'llvm-symbolizer', 'opt', 'sancov', 'sanstats'
]
llvm_config.add_tool_substitutions(llvm_tools)
self.substitutions = llvm_config.config.substitutions
import pprint
pprint.pprint(self.substitutions)
def expand_lit_subtitutions(self, cmd: str) -> str:
result = lit.TestRunner.applySubstitutions([cmd], self.substitutions)
assert len(result) == 1
print(blue(cmd), "->", red(result))
return result[0]
# TODO: reverse apply:
def add_lit_substitutions(args: "Options", run_line: str) -> str:
for path, replacement in ((args.clang_cmd, "%clang"), (args.opt_cmd, "opt"), (args.llc_cmd, "llc")):
if str(path) in run_line:
run_line = run_line.replace(str(path), replacement)
break
run_line = re.sub("%clang\s+-cc1", "%clang_cc1", run_line)
# convert %clang_cc1 -target-cpu cheri to %cheri_cc1 / %cheri_purecap_cc1
run_line = run_line.replace("-Werror=implicit-int", "") # important for creduce but not for the test
if "%clang_cc1" in run_line:
target_cpu_re = r"-target-cpu\s+cheri[^\s]*\s*"
triple_cheri_freebsd_re = re.compile(r"-triple\s+((?:cheri|mips64c128|mips64c256)-unknown-freebsd\d*(-purecap)?)*\s+")
found_cheri_triple = None
triple_match = re.search(triple_cheri_freebsd_re, run_line)
print(triple_match)
if triple_match:
found_cheri_triple = triple_match.group(1)
if re.search(target_cpu_re, run_line) or found_cheri_triple:
run_line = re.sub(target_cpu_re, "", run_line) # remove
run_line = re.sub(triple_cheri_freebsd_re, "", run_line) # remove
run_line = run_line.replace("%clang_cc1", "%cheri_cc1")
run_line = run_line.replace("-mllvm -cheri128", "")
run_line = re.sub(r"-cheri-size \d+ ", "", run_line) # remove
run_line = re.sub(r"-target-cpu mips4 ", "", run_line) # remove
target_abi_re = re.compile(r"-target-abi\s+purecap\s*")
if re.search(target_abi_re, run_line) is not None or "-purecap" in found_cheri_triple:
run_line = re.sub(target_abi_re, "", run_line) # remove
assert "%cheri_cc1" in run_line
run_line = run_line.replace("%cheri_cc1", "%cheri_purecap_cc1")
if "llc " in run_line:
# TODO: convert the 128/256 variants?
triple_cheri_freebsd_re = re.compile(r"-mtriple=+((?:cheri|mips64c128|mips64c256)-unknown-freebsd\d*(-purecap)?)*\s+")
found_cheri_triple = None
triple_match = re.search(triple_cheri_freebsd_re, run_line)
if triple_match:
found_cheri_triple = triple_match.group(1)
run_line = re.sub(triple_cheri_freebsd_re, "", run_line) # remove triple
target_abi_re = re.compile(r"-target-abi\s+purecap\s*")
if re.search(target_abi_re, run_line) is not None or "-purecap" in found_cheri_triple:
# purecap
run_line = re.sub(target_abi_re, "", run_line) # remove
run_line = re.sub(r"\s-relocation-model=pic", "", run_line) # remove
run_line = re.sub("llc\s+", "%cheri_purecap_llc ", run_line) # remove triple
else:
# hybrid
run_line = re.sub("llc\s+", "%cheri_llc ", run_line) # remove triple
# remove 128 vs 256:
run_line = re.sub(r" -cheri-size \d+", "", run_line) # remove
run_line = re.sub(r" -mattr=\+cheri\d+", "", run_line) # remove
run_line = re.sub(r" -mcpu=\+cheri\d+", "", run_line) # remove
run_line = re.sub(r" -mattr=\+chericap", "", run_line) # remove (implied by %cheri)
if "opt " in run_line:
run_line = re.sub(r"opt\s+-mtriple=cheri-unknown-freebsd", "%cheri_opt", run_line)
return run_line
# to test the lit substitutions
# class fake_args:
# clang_cmd = "/path/to/clang"
# llc_cmd = "/path/to/llc"
# opt_cmd = "/path/to/opt"
#
# print(add_lit_substitutions(fake_args(), "llc -o /dev/null -mtriple=cheri-unknown-freebsd-purecap -relocation-model=pic -thread-model=posix -mattr=-noabicalls -mattr=+soft-float -mattr=+chericap -mattr=+cheri128 -target-abi purecap -float-abi=soft -vectorize-loops -vectorize-slp -mcpu=mips4 -O2 -mxcaptable=false -mips-ssection-threshold=0 -cheri-cap-table-abi=pcrel -verify-machineinstrs %s"))
# print(add_lit_substitutions(fake_args(), "%clang_cc1 -triple mips64c128-unknown-freebsd13-purecap -munwind-tables -fuse-init-array -target-cpu mips4 -target-abi purecap -cheri-size 128 -mllvm -cheri-cap-table-abi=pcrel -target-linker-version 450.3 -std=c++11 -fno-builtin -faddrsig -o - -emit-llvm -O0 -Wimplicit-int -Wfatal-errors %s"))
#
# sys.exit()
class ReduceTool(metaclass=ABCMeta):
def __init__(self, args: "Options", name: str, tool: Path) -> None:
self.tool = tool
self.name = name
self.exit_statement = ""
self.args = args
self.infile_name = None
self.not_interesting_exit_code = None # type: int
self.interesting_exit_code = None # type: int
print("Reducing test case using", name)
def _reduce_script_text(self, input_file: Path, run_cmds: typing.List[typing.List[str]]):
verbose_print("Generating reduce script for the following commands:", run_cmds)
# Handling timeouts in a shell script is awful -> just generate a python script instead
result = """#!/usr/bin/env python3
import subprocess
import os
import signal
import sys
# https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true/4791612#4791612
def run_cmd(cmd, timeout):
with subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid) as process:
try:
stdout, stderr = process.communicate(timeout=timeout)
retcode = process.poll()
return subprocess.CompletedProcess(process.args, retcode, stdout, stderr)
except subprocess.TimeoutExpired:
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
process.kill()
raise subprocess.TimeoutExpired(process.args, timeout)
except:
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
process.kill()
process.wait()
raise
"""
timeout_arg = self.args.timeout if self.args.timeout else "None"
for cmd in run_cmds:
# check for %s should have happened earlier
assert "%s" in cmd, cmd
compiler_cmd = quote_cmd(cmd).replace("%s", self.input_file_arg(input_file))
assert compiler_cmd.startswith("/"), "Command must use absolute path: " + compiler_cmd
grep_msg = ""
crash_flag = "--crash" if self.args.expected_error_kind in (None, ErrorKind.CRASH) else ""
if self.args.crash_message:
grep_msg += "2>&1 | grep -F " + shlex.quote(self.args.crash_message)
# exit once the first command crashes
timeout_exitcode = self.not_interesting_exit_code
if self.args.expected_error_kind == ErrorKind.INFINITE_LOOP:
timeout_exitcode = self.interesting_exit_code
result += """
try:
command = r'''{not_cmd} {crash_flag} {command} {grep_msg} '''
result = run_cmd(command, timeout={timeout_arg})
if result.returncode != 0:
sys.exit({not_interesting})
except subprocess.TimeoutExpired:
print("TIMED OUT", file=sys.stderr)
sys.exit({timeout_exitcode})
except Exception as e:
print("SOME OTHER ERROR:", e)
sys.exit({not_interesting})
""".format(timeout_arg=timeout_arg, not_interesting=self.not_interesting_exit_code, timeout_exitcode=timeout_exitcode,
not_cmd=self.args.not_cmd, crash_flag=crash_flag, command=compiler_cmd, grep_msg=grep_msg)
return result + "sys.exit(" + str(self.interesting_exit_code) + ")"
def _create_reduce_script(self, tmpdir: Path, input_file: Path, run_cmds):
reduce_script = Path(tmpdir, "reduce_script.sh").absolute()
reduce_script_text = self._reduce_script_text(input_file, run_cmds)
reduce_script.write_text(reduce_script_text)
print("Reduce script:\n", bold(reduce_script_text), sep="")
reduce_script.chmod(0o755)
if not self.is_reduce_script_interesting(reduce_script, input_file):
die("Reduce script is not interesting!")
return reduce_script
def create_test_case(self, input_text: str, test_case: Path,
run_lines: typing.List[str]):
processed_run_lines = []
# TODO: try to remove more flags from the RUN: line!
for run_line in run_lines:
verbose_print("Adding run line: ", run_line)
with_lit_subs = add_lit_substitutions(self.args, run_line)
verbose_print("Substituted line: ", with_lit_subs)
processed_run_lines.append(with_lit_subs)
result = "\n".join(processed_run_lines) + "\n" + input_text
with test_case.open("w", encoding="utf-8") as f:
f.write(result)
f.flush()
print("\nResulting test case ", test_case, sep="")
verbose_print(result)
def is_reduce_script_interesting(self, reduce_script: Path, input_file: Path) -> bool:
raise NotImplemented()
@abstractmethod
def reduce(self, input_file: Path, extra_args: list, tempdir: Path,
run_cmds: typing.List[typing.List[str]],
run_lines: typing.List[str]):
raise NotImplemented()
@abstractmethod
def input_file_arg(self, input_file: Path) -> str:
raise NotImplemented()
class RunBugpoint(ReduceTool):
def __init__(self, args: "Options") -> None:
super().__init__(args, "bugpoint", tool=args.bugpoint_cmd)
# bugpoint wants a non-zero exit code on interesting exit code
self.interesting_exit_code = 1 # type: int
self.not_interesting_exit_code = 0 # type: int
def reduce(self, input_file, extra_args, tempdir,
run_cmds: typing.List[typing.List[str]],
run_lines: typing.List[str]):
bugpoint = [self.tool, "-opt-command=" + str(self.args.opt_cmd), "-output-prefix=" + input_file.name]
if self.args.verbose:
bugpoint.append("-verbose-errors")
expected_output_file = Path.cwd() / (input_file.name + "-reduced-simplified.bc")
if expected_output_file.exists():
print("bugpoint output file already exists: ", bold(expected_output_file))
if input("Delete it and continue? [Y/n]").lower().startswith("n"):
die("Can't continue")
else:
expected_output_file.unlink()
# use a custom script to check for matching crash message:
# This is also needed when reducing infinite loops since otherwise bugpoint will just freeze
if self.args.crash_message or self.args.expected_error_kind == ErrorKind.INFINITE_LOOP:
# check that the reduce script is interesting:
# http://blog.llvm.org/2015/11/reduce-your-testcases-with-bugpoint-and.html
# ./bin/bugpoint -compile-custom -compile-command=./check.sh -opt-command=./bin/opt my_test_case.ll
reduce_script = self._create_reduce_script(tempdir, input_file.absolute(), run_cmds)
print("Checking whether reduce script works")
test_result = subprocess.run([str(reduce_script.absolute()), str(input_file)])
if test_result.returncode == 0:
die("Interestingness test failed for bugpoint. Does the command really crash? Script was",
reduce_script.read_text())
bugpoint += ["-compile-custom", "-compile-command=" + str(reduce_script.absolute()), input_file]
else:
bugpoint += ["-run-llc-ia", input_file]
tool_args = run_cmds[0][1:]
# filter the tool args
bugpoint += ["--tool-args", "--"]
skip_next = False
for arg in tool_args:
if skip_next:
skip_next = False
continue
elif "%s" in arg:
continue
elif arg.strip() == "-o":
skip_next = True
continue
else:
bugpoint.append(arg)
bugpoint += extra_args
print("About to run", bugpoint)
print("Working directory:", os.getcwd())
try:
env = os.environ.copy()
env["PATH"] = str(self.args.bindir) + ":" + env["PATH"]
try:
run(bugpoint, env=env)
except KeyboardInterrupt:
print(red("\nCTRL+C detected, stopping bugpoint.", style="bold"))
finally:
print("Output files are in:", os.getcwd())
# TODO: generate a test case from the output files?
if expected_output_file.exists():
print("Attempting to convert generated bitcode file to a test case...")
dis = subprocess.run([str(self.args.llvm_dis_cmd), "-o", "-", str(expected_output_file)], stdout=subprocess.PIPE)
# Rename instructions to avoid stupidly long names generated by bugpoint:
renamed = subprocess.run([str(self.args.opt_cmd), "-S", "-o", "-", "--instnamer", "--metarenamer",
"--name-anon-globals", str(expected_output_file)], stdout=subprocess.PIPE)
self.create_test_case(renamed.stdout.decode("utf-8"), input_file.with_suffix(".test" + input_file.suffix), run_lines)
def input_file_arg(self, input_file: Path):
# bugpoint expects a script that takes the input files as arguments:
return "''' + ' '.join(sys.argv[1:]) + '''"
def is_reduce_script_interesting(self, reduce_script: Path, input_file: Path) -> bool:
proc = subprocess.run([str(reduce_script), str(input_file)])
return proc.returncode == self.interesting_exit_code
class RunLLVMReduce(ReduceTool):
def __init__(self, args: "Options") -> None:
super().__init__(args, "llvm-reduce", tool=args.llvm_reduce_cmd)
# bugpoint wants a non-zero exit code on interesting exit code
self.interesting_exit_code = 0 # type: int
self.not_interesting_exit_code = 1 # type: int
def reduce(self, input_file, extra_args, tempdir, run_cmds: typing.List[typing.List[str]], run_lines: typing.List[str]):
expected_output_file = Path.cwd() / (input_file.name + "-reduced.ll")
if expected_output_file.exists():
print("bugpoint output file already exists: ", bold(expected_output_file))
if input("Delete it and continue? [Y/n]").lower().startswith("n"):
die("Can't continue")
else:
expected_output_file.unlink()
# This is also needed when reducing infinite loops since otherwise bugpoint will just freeze
reduce_script = self._create_reduce_script(tempdir, input_file.absolute(), run_cmds)
llvm_reduce = [self.tool, "--test=" + str(reduce_script.absolute()),
"--output=" + str(expected_output_file), input_file]
llvm_reduce += extra_args
print("About to run", llvm_reduce)
print("Working directory:", os.getcwd())
try:
env = os.environ.copy()
env["PATH"] = str(self.args.bindir) + ":" + env["PATH"]
try:
run(llvm_reduce, env=env)
except KeyboardInterrupt:
print(red("\nCTRL+C detected, stopping llvm-reduce.", style="bold"))
finally:
print("Output files are in:", os.getcwd())
# TODO: generate a test case from the output files?
if expected_output_file.exists():
# print("Renaming functions in test...")
# renamed = subprocess.run([str(self.args.opt_cmd), "-S", "-o", "-", "--instnamer", "--metarenamer",
# "--name-anon-globals", str(expected_output_file)], stdout=subprocess.PIPE)
# self.create_test_case(renamed.stdout.decode("utf-8"), input_file.with_suffix(".test" + input_file.suffix), run_lines)
self.create_test_case(expected_output_file.read_text("utf-8"), input_file.with_suffix(".test" + input_file.suffix), run_lines)
def input_file_arg(self, input_file: Path):
# llvm-reduce expects a script that takes the input files as arguments:
return "''' + ' '.join(sys.argv[1:]) + '''"
def is_reduce_script_interesting(self, reduce_script: Path, input_file: Path) -> bool:
proc = subprocess.run([str(reduce_script), str(input_file)])
return proc.returncode == self.interesting_exit_code
class RunCreduce(ReduceTool):
def __init__(self, args: "Options") -> None:
super().__init__(args, "creduce", tool=args.creduce_cmd)
self.exit_statement = "&& exit 0"
# creduce wants a zero exit code on interesting test cases
self.interesting_exit_code = 0
self.not_interesting_exit_code = 1
def reduce(self, input_file: Path, extra_args, tempdir,
run_cmds: typing.List[typing.List[str]],
run_lines: typing.List[str]):
reduce_script = self._create_reduce_script(tempdir, input_file.absolute(), run_cmds)
creduce = ["time", str(self.tool), str(reduce_script), str(input_file), "--timing"] + extra_args
# This is way too verbose
if self.args.extremely_verbose:
creduce.append("--print-diff")
print("About to run", creduce)
try:
# work around https://github.com/csmith-project/creduce/issues/195 for released versions of creduce
shutil.copy(str(input_file), str(Path(tempdir, input_file.name)))
run(creduce, cwd=tempdir)
except KeyboardInterrupt:
print(red("\nCTRL+C detected, stopping creduce.", style="bold"))
# write the output test file:
print("\nDONE!")
self.create_test_case(input_file.read_text(encoding="utf-8"),
input_file.with_suffix(".test" + input_file.suffix),
run_lines)
def input_file_arg(self, input_file: Path):
# creduce creates an input file in the test directory with the same name as the original input
return input_file.name
def is_reduce_script_interesting(self, reduce_script: Path, input_file: Path) -> bool:
if self.args.verbose:
return self.__is_reduce_script_interesting(reduce_script, input_file)
else:
return True # creduce checks anyway, this just wastes time
@staticmethod
def __is_reduce_script_interesting(reduce_script: Path, input_file: Path) -> bool:
with tempfile.TemporaryDirectory() as tmpdir:
shutil.copy(str(input_file), str(Path(tmpdir, input_file.name)))
proc = subprocess.run([str(reduce_script), str(input_file)], cwd=tmpdir)
return proc.returncode == 0
class SkipReducing(ReduceTool):
def __init__(self, args: "Options") -> None:
super().__init__(args, "noop", tool=Path("/dev/null"))
def reduce(self, input_file, extra_args, tempdir,
run_cmds: typing.List[typing.List[str]],
run_lines: typing.List[str]):
self.create_test_case("Some strange reduced test case\n",
input_file.with_suffix(".test" + input_file.suffix), run_lines)
def input_file_arg(self, input_file: Path) -> str:
raise NotImplemented()
class Options(object):
# noinspection PyUnresolvedReferences
def __init__(self, args: argparse.Namespace) -> None:
self.verbose = args.verbose # type: bool
self.timeout = args.timeout # type: int
self.extremely_verbose = args.extremely_verbose # type: bool
self.bindir = Path(args.bindir)
self.args = args
self.no_initial_reduce = args.no_initial_reduce # type: bool
self.crash_message = args.crash_message # type: str
self.llvm_error = args.llvm_error # type: bool
# could also be an LLVM error or Address Sanitizer error that returns a non-crash exit code
self.expected_error_kind = None # type: ErrorKind
if self.llvm_error:
self.expected_error_kind = ErrorKind.FATAL_ERROR
if args.infinite_loop:
if not self.timeout:
self.timeout = 30
self.expected_error_kind = ErrorKind.INFINITE_LOOP
@property
def clang_cmd(self):
return self._get_command("clang")
@property
def opt_cmd(self):
return self._get_command("opt")
@property
def not_cmd(self):
return self._get_command("not")
@property
def llc_cmd(self):
return self._get_command("llc")
@property
def llvm_dis_cmd(self):
return self._get_command("llvm-dis")
@property
def bugpoint_cmd(self):
return self._get_command("bugpoint")
@property
def llvm_reduce_cmd(self):
return self._get_command("llvm-reduce")
@property
def creduce_cmd(self):
# noinspection PyUnresolvedReferences
creduce_path = self.args.creduce_cmd or shutil.which("creduce")
if not creduce_path:
die("Could not find `creduce` in $PATH. Add it to $PATH or pass --creduce-cmd")
return Path(creduce_path)
def _get_command(self, name):
result = Path(getattr(self.args, name + "_cmd", None) or Path(self.bindir, name))
if not result.exists():
die("Invalid `" + name + "` binary`", result)
return result
class Reducer(object):
def __init__(self, parser: argparse.ArgumentParser) -> None:
self.args, self.reduce_args = parser.parse_known_args()
if self.args.extremely_verbose:
self.args.verbose = True
global options
options = Options(self.args)
self.options = options
self.subst_handler = LitSubstitutionHandler(options)
self.testcase = Path(self.args.testcase)
# RUN: lines to add to the test case
self.run_lines = [] # type: typing.List[str]
# the lines without RUN: suitably quoted for passing to a shell
self.run_cmds = [] # type: typing.List[typing.List[str]]
self.reduce_tool = None # type: ReduceTool
# returns the real input file
def parse_RUN_lines(self, infile: Path) -> Path:
is_crash_reproducer = infile.suffix == ".sh"
if is_crash_reproducer:
verbose_print("Input file is a crash reproducer script")
verbose_print("Finding test command(s) in", infile)
with infile.open("r", errors="replace", encoding="utf-8") as f:
if is_crash_reproducer:
real_infile = self._parse_crash_reproducer(infile, f)
else:
real_infile = infile
self._parse_test_case(f, infile)
if len(self.run_cmds) < 1:
die("Could not find any RUN: lines in", infile)
return real_infile
def _parse_crash_reproducer(self, infile, f) -> Path:
real_in_file = None
for line in f.readlines():
if line.strip().startswith("#"):
continue
command = shlex.split(line)
if "clang" not in command[0]:
die("Executed program should contain 'clang', but was", command[0])
source_file_index = -1
source_file_name = command[source_file_index]
source_file = infile.with_name(source_file_name)
while source_file_name.startswith("-"):
print("WARNING: crash reproducer command line probably does not end with the input file",
"name: got", blue(source_file_name), "which is probably not a file!")
source_file_index = source_file_index - 1
source_file_name = command[source_file_index]
source_file = infile.with_name(source_file_name)
if not source_file.exists():
continue
if not source_file.exists():
die("Reproducer input file", source_file, "does not exist!")
real_in_file = source_file
verbose_print("Real input file is", real_in_file)
command[source_file_index] = "%s"
# output to stdout
if "-o" not in command:
print("Adding '-o -' to the compiler invocation")
command += ["-o", "-"]
# try to remove all unnecessary command line arguments
command[0] = str(self.options.clang_cmd) # replace command with the clang binary
command, real_in_file = self.simplify_crash_command(command, real_in_file.absolute())
assert Path(command[0]).is_absolute(), "Command must be absolute: " + command[0]
quoted_cmd = quote_cmd(command)
verbose_print("Test command is", bold(quoted_cmd))
self.run_cmds.append(command)
if real_in_file.suffix == ".ll":
comment_start = ";"
elif real_in_file.suffix in (".S", ".s"):
comment_start = "#"
else:
comment_start = "//"
self.run_lines.append(comment_start + " RUN: " + quoted_cmd)
if not real_in_file:
die("Could not compute input file for crash reproducer")
return real_in_file
def _check_crash(self, command, infile, proc_info: subprocess.CompletedProcess=None, force_print_cmd=False) -> typing.Optional[ErrorKind]:
# command = ["/tmp/crash"]
full_cmd = command + [str(infile)]
assert "%s" not in full_cmd, full_cmd
if force_print_cmd:
print("\nRunning", blue(quote_cmd(full_cmd)))
else:
verbose_print("\nRunning", blue(quote_cmd(full_cmd)))
if self.args.reduce_tool == "noop":
if proc_info is not None:
proc_info.stderr = b"Assertion `noop' failed."
print(green(" yes"))
return ErrorKind.CRASH
infinite_loop_timeout = self.options.timeout if self.options.timeout else 30
try:
proc = subprocess.run(full_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
timeout=infinite_loop_timeout)
error_kind = None
if proc.returncode < 0 or proc.returncode == 254:
error_kind = ErrorKind.CRASH
else:
verbose_print("Exit code", proc.returncode, "was not a crash, checking stderr for known error.")
verbose_print("stderr:", proc.stderr)
# treat fatal llvm errors (cannot select, etc)as crashes too
# Also ASAN errors just return 1 instead of a negative exit code so we have to grep the message
for kind in ErrorKind:
if kind.value:
verbose_print("Checking for", kind.value)
if any(msg in proc.stderr for msg in kind.value):
verbose_print("Crash was", kind)
error_kind = kind
break
except subprocess.TimeoutExpired as e:
proc = subprocess.CompletedProcess(e.args, -1, e.stdout, e.stderr)
error_kind = ErrorKind.INFINITE_LOOP
verbose_print("Running took longer than", infinite_loop_timeout, "seconds -> assuming infinite loop")
if proc_info is not None: # To get the initial error message:
proc_info.stdout = proc.stdout
proc_info.stderr = proc.stderr
proc_info.returncode = proc.returncode
proc_info.args = proc.args
if error_kind:
if self.options.expected_error_kind and self.options.expected_error_kind != error_kind:
print(red(" yes, but got " + error_kind.name + " instead of " + self.options.expected_error_kind.name))
return None
crash_message_found = not self.options.crash_message or (self.options.crash_message in proc.stderr.decode("utf-8"))
if error_kind == ErrorKind.INFINITE_LOOP or crash_message_found:
print(green(" yes"))
return error_kind
else:
print(red(" yes, but with a different crash message!"))
print("Note: Expected crash message '", bold(self.options.crash_message), "' not found in:\n",
proc.stderr.decode("utf-8"), sep="")
return None
print(red(" no"))
return None
@staticmethod
def _filter_args(args, *, noargs_opts_to_remove=list(), noargs_opts_to_remove_startswith=list(),
one_arg_opts_to_remove=list(), one_arg_opts_to_remove_if=None):
if one_arg_opts_to_remove_if is None:
one_arg_opts_to_remove_if = dict()
result = []
skip_next = False
def should_remove_arg(option, value):
for a, predicate in one_arg_opts_to_remove_if.items():
if option == a:
verbose_print("Testing predicate", predicate, "for arg", option, "on", value)
if predicate(value):
return True
return False
for i, arg in enumerate(args):
if skip_next:
skip_next = False
continue
if any(arg == a for a in noargs_opts_to_remove) or any(arg.startswith(a) for a in noargs_opts_to_remove_startswith):
continue
if any(arg == a for a in one_arg_opts_to_remove):
skip_next = True
continue
if (i + 1) < len(args) and should_remove_arg(arg, args[i + 1]):
skip_next = True
continue
# none of the filters matches -> append to the result
result.append(arg)
return result
def _try_remove_args(self, command: list, infile: Path, message: str, *, extra_args: list=None, **kwargs):
new_command = self._filter_args(command, **kwargs)
print(message, end="", flush=True)
if extra_args:
new_command += extra_args
if new_command == command:
print(green(" none of those flags are in the command line"))
return command
if self._check_crash(new_command, infile):
return new_command
return command
@staticmethod
def _infer_crash_message(stderr: bytes):
print("Inferring crash message from", stderr)
if not stderr:
return None
simple_regexes = [re.compile(s) for s in (
r"Assertion `(.+)' failed.", # Linux assert()
r"Assertion failed: \(.+\),", # FreeBSD/Mac assert()
r"UNREACHABLE executed( at .+)?!", # llvm_unreachable()
# generic code gen crashes (at least creduce will keep the function name):
r"LLVM IR generation of declaration '(.+)'",
r"Generating code for declaration '(.+)'",
r"LLVM ERROR: Cannot select: (.+)",
r"LLVM ERROR: Cannot select:",
r"LLVM ERROR: (.+)",
r"\*\*\* Bad machine code: (.+) \*\*\*",
)]
regexes = [(r, 0) for r in simple_regexes]
# For this crash message we only want group 1
# TODO: add another grep for the program counter
regexes.insert(0, (re.compile(r"ERROR: (AddressSanitizer: .+ on address) 0x[0-9a-fA-F]+ (at pc 0x[0-9a-fA-F]+)"), 1))
# only get the kind of the cannot select from the message (without the number for tNN)
regexes.insert(0, (re.compile(r"LLVM ERROR: Cannot select: (t\w+): (.+)", ), 2))
# This message is different when invoked as llc: it prints LLVM ERROR instead
# so we only capture the actual message
regexes.insert(0, (re.compile(r"fatal error: error in backend:(.+)"), 1))
# same without colour diagnostics:
regexes.insert(0, (re.compile("\x1b\\[0m\x1b\\[0;1;31mfatal error: \x1b\\[0merror in backend:(.+)"), 1))
# Any other error in backed
regexes.append((re.compile(r"error in backend:(.+)"), 1))
# final fallback message: generic fatal error:
regexes.append((re.compile(r"fatal error:(.+)"), 0))
for line in stderr.decode("utf-8").splitlines():
# Check for failed assertions:
for r, index in regexes:
match = r.search(line)
if match:
message = match.group(index)
if "\x1b" in message:
message = message[:message.rfind("\x1b")]
print("Inferred crash message bytes: ", message.encode("utf-8"))
return message
return None
def simplify_crash_command(self, command: list, infile: Path) -> tuple:
new_command = command.copy()
assert new_command.count("%s") == 1, new_command
new_command.remove("%s")
# Remove -mllvm options that no longer exist
def is_old_llvm_option(opt: str):
# -cheri-cap-table=true/false was replace with -mllvm -cheri-cap-table-abi=
if opt == "-cheri-cap-table" or opt.startswith("-cheri-cap-table="):
return True
return False
new_command = self._filter_args(new_command, one_arg_opts_to_remove_if={"-mllvm": is_old_llvm_option})
print("Checking whether reproducer crashes with ", new_command[0], ":", sep="", end="", flush=True)
crash_info = subprocess.CompletedProcess(None, None)
self.options.expected_error_kind = self._check_crash(new_command, infile, crash_info, force_print_cmd=True)
if not self.options.expected_error_kind:
die("Crash reproducer no longer crashes?")
verbose_print("Reducing a", self.options.expected_error_kind, "with message =", self.options.crash_message)
verbose_print("Stderr was", crash_info.stderr)
if not self.options.crash_message:
if self.options.expected_error_kind == ErrorKind.INFINITE_LOOP:
self.options.crash_message = "INFINITE LOOP WHILE RUNNING, THIS GREP SHOULD NEVER MATCH!"
else:
print("Attempting to infer crash message from process output")
inferred_msg = self._infer_crash_message(crash_info.stderr)
if inferred_msg:
print("Inferred crash message as '" + green(inferred_msg) + "'")
if not input("Use this message? [Y/n]").lower().startswith("n"):
self.options.crash_message = inferred_msg
else:
print("Could not infer crash message, stderr was:\n\n")
print(crash_info.stderr.decode("utf-8"))
print("\n\n")
if not self.options.crash_message:
print("Could not infer crash message from crash reproducer.")
print(red("WARNING: Reducing without specifying the crash message will probably result"
" in the wrong test case being generated."))
if not input("Are you sure you want to continue? [y/N]").lower().startswith("y"):
sys.exit()
if new_command[0] == str(self.options.clang_cmd):
new_command, infile = self._simplify_clang_crash_command(new_command, infile)
elif new_command[0] == str(self.options.llc_cmd):
# TODO: should be able to simplify llc crashes (e.g. by adding -O0, -verify-machineinstrs, etc)
pass
new_command.append("%s") # ensure that the command contains %s at the end
return new_command, infile
@staticmethod
def list_with_flag_at_end(orig: list, flag: str) -> list:
result = list(orig)
while flag in result:
result.remove(flag)
result.append(flag)
return result
def _simplify_clang_crash_command(self, new_command: list, infile: Path) -> tuple:
assert new_command[0] == str(self.options.clang_cmd)
assert "-o" in new_command
assert "%s" not in new_command
full_cmd = new_command.copy()
new_command = self._try_remove_args(
new_command, infile, "Checking whether replacing optimization level with -O0 crashes:",
noargs_opts_to_remove_startswith=["-O"],
extra_args=["-O0"]
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -coverage-notes-file crashes:",
one_arg_opts_to_remove=["-coverage-notes-file"]
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without debug info crashes:",
noargs_opts_to_remove=["-dwarf-column-info", "-munwind-tables", "-ggnu-pubnames"],
one_arg_opts_to_remove=["-split-dwarf-file", "-split-dwarf-output"],
noargs_opts_to_remove_startswith=["-debug-info-kind=", "-dwarf-version=", "-debugger-tuning=",
"-fdebug-prefix-map="],
)
# try emitting llvm-ir (i.e. frontend bug):
print("Checking whether -emit-llvm crashes:", end="", flush=True)
generate_ir_cmd = self.list_with_flag_at_end(new_command, "-emit-llvm")
if "-cc1" in generate_ir_cmd:
# Don't add the optnone attribute to the generated IR function
generate_ir_cmd = self.list_with_flag_at_end(generate_ir_cmd, "-disable-O0-optnone")
while "-emit-obj" in generate_ir_cmd:
generate_ir_cmd.remove("-emit-obj")
if self._check_crash(generate_ir_cmd, infile):
print("Crashed while generating IR -> must be a", blue("frontend crash.", style="bold"),
"Will need to use creduce for test case reduction")
# Try to remove the flags that were added:
new_command = generate_ir_cmd
new_command = self._try_remove_args(
new_command, infile, "Checking if it also crashes at -O0:",
noargs_opts_to_remove=["-disable-O0-optnone"],
noargs_opts_to_remove_startswith=["-O"],
extra_args=["-O0"]
)
return self._simplify_frontend_crash_cmd(new_command, infile)
else:
print("Must be a ", blue("backend crash", style="bold"), ", ", end="", sep="")
if self.args.reduce_tool == "creduce":
print("but reducing with creduce requested. Will not try to convert to a bugpoint test case")
return self._simplify_frontend_crash_cmd(new_command, infile)
else:
print("will try to use bugpoint/llvm-reduce.")
return self._simplify_backend_crash_cmd(new_command, infile, full_cmd)
def _shrink_preprocessed_source(self, input_path, out_file):
# The initial remove #includes pass takes a long time -> remove all the includes that are inside a #if 0
# This is especially true for C++ because there are so many #included files in preprocessed input
with input_path.open("r", errors="replace", encoding="utf-8") as input_file:
line_regex = re.compile(r'^#\s+\d+\s+".*".*')
start_rewrite_includes = re.compile(r"^\s*#if\s+0\s+/\* expanded by -frewrite-includes \*/\s*")
end_rewrite_includes = re.compile(r"^\s*#endif\s+/\* expanded by -frewrite-includes \*/\s*")
in_rewrite_includes = False
max_rewrite_includes_lines = 10
skipped_rewrite_includes = 0
for line in input_file.readlines():
if re.match(start_rewrite_includes, line):
extremely_verbose_print("Starting -frewrite-includes-block:", line.rstrip())
assert not in_rewrite_includes
assert skipped_rewrite_includes == 0
in_rewrite_includes = True
continue
elif re.match(end_rewrite_includes, line):
extremely_verbose_print("Ending -frewrite-includes-block, skipped", skipped_rewrite_includes, "lines")
assert in_rewrite_includes
in_rewrite_includes = False
skipped_rewrite_includes = 0
continue
elif in_rewrite_includes:
if skipped_rewrite_includes > max_rewrite_includes_lines:
die("Error in initial reduction, rerun with --no-initial-reduce")
extremely_verbose_print("Skipping line inside -frewrite-includes:", line.rstrip())
skipped_rewrite_includes += 1
continue
elif line.lstrip().startswith("//"):
continue # skip line comments
# This appears to break creduce sometimes:
elif re.match(line_regex, line):
extremely_verbose_print("Removing # line directive:", line.rstrip())
continue
else:
out_file.write(line)
out_file.flush()
if self.args.extremely_verbose:
verbose_print("Initial reduction:")
subprocess.call(["diff", "-u", str(input_path), out_file.name])
pass
def _simplify_frontend_crash_cmd(self, new_command: list, infile: Path):
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without warnings crashes:",
noargs_opts_to_remove=["-w"], noargs_opts_to_remove_startswith=["-W"], extra_args=["-w"])
# Try to make implicit int an error to generate more sensible test output
# If we don't add this we get really obscure code that doesn't look like it should compile
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling with -Werror=implicit-int crashes:",
noargs_opts_to_remove=["-w"], extra_args=["-Wimplicit-int", "-Werror=implicit-int"])
# speed up test case reduction by aborting the compilation on the first error
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling with -Wfatal-errors crashes:",
noargs_opts_to_remove=["-w"], extra_args=["-Wfatal-errors"]
)
# Removing all the #ifdefs and #defines that get added by the #included headers can speed up reduction a lot
print("Generating preprocessed source")
try:
preprocessed = infile.with_name(infile.stem + "-pp" + infile.suffix)
base_pp_command = self._filter_args(new_command, one_arg_opts_to_remove=["-o"],
noargs_opts_to_remove=["-S", "-emit-llvm"])
base_pp_command += ["-E", "-o", str(preprocessed), str(infile)]
no_line_pp_command = base_pp_command + ["-P"]
verbose_print(no_line_pp_command)
subprocess.check_call(no_line_pp_command)
assert preprocessed.exists()
print("Checking if preprocessed source (without line numbers)", preprocessed, "crashes: ", end="", flush=True)
if self._check_crash(new_command, preprocessed):
infile = preprocessed
else:
print(red("Compiling preprocessed source (without line numbers)", preprocessed,
"no longer crashes, not using it. Will try with line numbers"))
verbose_print(base_pp_command)
subprocess.check_call(base_pp_command)
assert preprocessed.exists()
print("Checking if preprocessed source", preprocessed, "crashes: ", end="", flush=True)
if self._check_crash(new_command, preprocessed):
infile = preprocessed
else:
print(red("Compiling preprocessed source (with line numbers)", preprocessed,
"no longer crashes, not using it."))
except Exception as e:
print("Failed to preprocess", infile, "-> will use the unprocessed source ", e)
# creduce wastes a lot of time trying to remove #includes and dead cases generated
# by -frewrite-includes (if the preprocessed source no longer crashes)
# We also try to remove the #line directives
try:
smaller = infile.with_name(infile.stem + "-smaller" + infile.suffix)
with smaller.open("w", encoding="utf-8") as reduced_file:
original_size = infile.stat().st_size
self._shrink_preprocessed_source(infile, reduced_file)
reduced_file.flush()
new_size = smaller.stat().st_size
percent_reduction = 100 - 100.0 * (new_size / original_size)
print("Initial preprocessing: {} bytes -> {} bytes ({}% reduction)".format(
original_size, new_size, percent_reduction))
if self._check_crash(new_command, smaller):
infile = smaller
else:
print(red("Compiling processed preprocessed source", smaller,
"no longer crashes, not using it."))
except Exception as e:
print("Failed to shrink", infile, "-> will use the unprocessed source", e)
if "-emit-obj" in new_command:
new_command = self._try_remove_args(
new_command, infile, "Checking whether emitting ASM instead of object crashes:",
noargs_opts_to_remove=["-emit-obj"], extra_args=["-S"])
# check if floating point args are relevant
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without floating point arguments crashes:",
noargs_opts_to_remove=["-msoft-float"],
one_arg_opts_to_remove=["-mfloat-abi"],
one_arg_opts_to_remove_if={"-target-feature": lambda a: a == "+soft-float"}
)
# check if math args are relevant
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without math arguments crashes:",
noargs_opts_to_remove=["-fno-rounding-math", "-fwrapv"])
# check if frame pointer args are relevant
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without frame pointer argument crashes:",
noargs_opts_to_remove=["-mdisable-fp-elim"],
noargs_opts_to_remove_startswith=["-mframe-pointer="]
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without PIC flags crashes:",
one_arg_opts_to_remove=["-mrelocation-model", "-pic-level"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without thread model flags crashes:",
one_arg_opts_to_remove=["-mthread-model"],
noargs_opts_to_remove_startswith=["-ftls-model=initial-exec"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -target-feature flags crashes:",
one_arg_opts_to_remove=["-target-feature"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without various MIPS flags crashes:",
one_arg_opts_to_remove_if={"-mllvm": lambda a: a.startswith("-mips-ssection-threshold=") or a == "-mxgot" or a == "-mgpopt"}
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without various CHERI flags crashes:",
noargs_opts_to_remove=["-cheri-linker"],
one_arg_opts_to_remove_if={"-mllvm": lambda a: a.startswith("-cheri-cap-table-abi=") or a.startswith("-mxcaptable")}
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -mrelax-all crashes:",
noargs_opts_to_remove=["-mrelax-all"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -D flags crashes:",
noargs_opts_to_remove=["-sys-header-deps"],
one_arg_opts_to_remove=["-D"]
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without include flags crashes:",
noargs_opts_to_remove=["-nostdsysteminc", "-nobuiltininc"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without function/data sections crashes:",
noargs_opts_to_remove=["-ffunction-sections", "-fdata-sections"],
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -x flag crashes:",
one_arg_opts_to_remove=["-x"]
)
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -std= flag crashes:",
noargs_opts_to_remove_startswith=["-std="]
)
if "-disable-llvm-verifier" in new_command:
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without -disable-llvm-verifier crashes:",
noargs_opts_to_remove=["-disable-llvm-verifier"])
if "-fcxx-exceptions" in new_command or "-fexceptions" in new_command:
new_command = self._try_remove_args(
new_command, infile, "Checking whether compiling without exceptions crashes:",
noargs_opts_to_remove=["-fexceptions", "-fcxx-exceptions"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether misc C++ options can be removed:",
noargs_opts_to_remove=["-fno-rtti", "-mconstructor-aliases", "-nostdinc++"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether misc optimization options can be removed:",
noargs_opts_to_remove=["-vectorize-loops", "-vectorize-slp"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether addrsig/init-array options can be removed:",
noargs_opts_to_remove=["-fuse-init-array", "-faddrsig"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether -ffreestanding can be removed:",
noargs_opts_to_remove=["-ffreestanding"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether TLS/relocation model options can be removed:",
noargs_opts_to_remove_startswith=["-ftls-model="],
one_arg_opts_to_remove=["-mrelocation-model"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether -fgnuc-version= can be removed:",
noargs_opts_to_remove_startswith=["-fgnuc-version="])
new_command = self._try_remove_args(
new_command, infile, "Checking whether -target-cpu option can be removed:",
one_arg_opts_to_remove=["-target-cpu"])
new_command = self._try_remove_args(
new_command, infile, "Checking whether -target-abi option can be removed:",
one_arg_opts_to_remove=["-target-abi"])
# try to remove some arguments that should not be needed
new_command = self._try_remove_args(
new_command, infile, "Checking whether misc diagnostic options can be removed:",
noargs_opts_to_remove=["-disable-free", "-discard-value-names", "-masm-verbose",
"-fdeprecated-macro", "-fcolor-diagnostics"],
noargs_opts_to_remove_startswith=["-fdiagnostics-", "-fobjc-runtime="],
one_arg_opts_to_remove=["-main-file-name", "-ferror-limit", "-fmessage-length", "-fvisibility", "-target-linker-version"]
)
return new_command, infile
def _simplify_backend_crash_cmd(self, new_command: list, infile: Path, full_cmd: list):
# TODO: convert it to a llc commandline and use bugpoint
assert "-emit-llvm" not in full_cmd
assert "-o" in full_cmd
command = full_cmd.copy()
irfile = infile.with_name(infile.name.partition(".")[0] + "-bugpoint.ll")
command[command.index("-o") + 1] = str(irfile.absolute())
if "-discard-value-names" in command:
command.remove("-discard-value-names")
command = self.list_with_flag_at_end(command, "-emit-llvm")
command = self.list_with_flag_at_end(command, "-disable-O0-optnone")
command = self.list_with_flag_at_end(command, "-O0")
print("Generating IR file", irfile)
try:
verbose_print(command + [str(infile)])
subprocess.check_call(command + [str(infile)])
except subprocess.CalledProcessError:
print("Failed to generate IR from", infile, "will have to reduce using creduce")
return self._simplify_frontend_crash_cmd(new_command, infile)
if not irfile.exists():
die("IR file was not generated?")
llc_args = [str(self.options.llc_cmd), "-o", "/dev/null"] # TODO: -o -?
cpu_flag = None # -mcpu= only allowed once!
pass_once_flags = set()
# Some crash messages only happen with verify-machineinstrs:
pass_once_flags.add("-verify-machineinstrs")
skip_next = False
optimization_flag = "-O2"
for i, arg in enumerate(command):
if skip_next:
skip_next = False
continue
if arg == "-triple" or arg == "-target":
# assume well formed command line
llc_args.append("-mtriple=" + command[i + 1])
# forward all the llvm args
elif arg == "-mllvm":
llvm_flag = command[i + 1]
if llvm_flag == "-cheri128":
cpu_flag = "-mcpu=cheri128"
llc_args.append("-mattr=+cheri128")
else:
pass_once_flags.add(llvm_flag)
skip_next = True
elif arg == "-target-abi":
llc_args.append("-target-abi")
llc_args.append(command[i + 1])
skip_next = True
elif arg == "-target-cpu":
cpu_flag = "-mcpu=" + command[i + 1]
skip_next = True
elif arg == "-target-feature":
llc_args.append("-mattr=" + command[i + 1])
skip_next = True
elif arg == "-mrelocation-model":
llc_args.append("-relocation-model=" + command[i + 1])
skip_next = True
elif arg == "-mthread-model":
llc_args.append("-thread-model=" + command[i + 1])
skip_next = True
elif arg == "-msoft-float":
llc_args.append("-float-abi=soft")
elif arg.startswith("-vectorize"):
llc_args.append(arg)
elif arg.startswith("-O"):
if arg == "-Os":
arg = "-O2" # llc doesn't understand -Os
optimization_flag = arg
if cpu_flag:
llc_args.append(cpu_flag)
llc_args.append(optimization_flag)
llc_args.extend(pass_once_flags)
print("Checking whether compiling IR file with llc crashes:", end="", flush=True)
llc_info = subprocess.CompletedProcess(None, None)
if self._check_crash(llc_args, irfile, llc_info):
print("Crash found with llc -> using llvm-reduce followed by bugpoint which is faster than creduce.")
self.reduce_tool = self.get_llvm_ir_reduce_tool()
return llc_args, irfile
if self._check_crash(llc_args + ["-filetype=obj"], irfile, llc_info):
print("Crash found with llc -filetype=obj -> using llvm-reduce followed by bugpoint which is faster than creduce.")
self.reduce_tool = self.get_llvm_ir_reduce_tool()
return llc_args + ["-filetype=obj"], irfile
print("Compiling IR file with llc did not reproduce crash. Stderr was:", llc_info.stderr.decode("utf-8"))
print("Checking whether compiling IR file with opt crashes:", end="", flush=True)
opt_args = llc_args.copy()
opt_args[0] = str(self.options.opt_cmd)
opt_args.append("-S")
opt_info = subprocess.CompletedProcess(None, None)
if self._check_crash(opt_args, irfile, opt_info):
print("Crash found with opt -> using llvm-reduce followed by bugpoint which is faster than creduce.")
self.reduce_tool = self.get_llvm_ir_reduce_tool()
return opt_args, irfile
print("Compiling IR file with opt did not reproduce crash. Stderr was:", opt_info.stderr.decode("utf-8"))
print("Checking whether compiling IR file with clang crashes:", end="", flush=True)
clang_info = subprocess.CompletedProcess(None, None)
bugpoint_clang_cmd = self._filter_args(full_cmd, noargs_opts_to_remove_startswith=["-xc", "-W", "-std="],
one_arg_opts_to_remove=["-D", "-x", "-main-file-name"])
bugpoint_clang_cmd.extend(["-x", "ir"])
if self._check_crash(bugpoint_clang_cmd, irfile, clang_info):
print("Crash found compiling IR with clang -> using llvm-reduce followed by bugpoint which is faster than creduce.")
self.reduce_tool = self.get_llvm_ir_reduce_tool()
return bugpoint_clang_cmd, irfile
print("Compiling IR file with clang did not reproduce crash. Stderr was:", clang_info.stderr.decode("utf-8"))
print(red("No crash found compiling the IR! Possibly crash only happens when invoking clang -> using creduce."))
self.reduce_tool = RunCreduce(self.options)
return self._simplify_frontend_crash_cmd(new_command, infile)
def _parse_test_case(self, f, infile: Path):
# test case: just search for RUN: lines
for line in f.readlines():
match = re.match(r".*\s+RUN: (.+)", line)
if line.endswith("\\"):
die("RUN lines with continuations not handled yet")
if match:
command = match.group(1).strip()
if "%s" not in command:
die("RUN: line does not contain %s -> cannot create replacement invocation")
if "2>&1" in line:
die("Cannot handle 2>&1 in RUN lines yet")
verbose_print("Found RUN: ", command)
command = self.subst_handler.expand_lit_subtitutions(command)
verbose_print("After expansion:", command)
# We can only simplify the command line for clang right now
command, _ = self.simplify_crash_command(shlex.split(command), infile.absolute())
verbose_print("Final command:", command)
self.run_cmds.append(command)
self.run_lines.append(line[0:line.find(match.group(1))] + quote_cmd(command))
def run(self):
# scan test case for RUN: lines
infile = self.parse_RUN_lines(self.testcase)
if self.reduce_tool is None:
default_tool = RunBugpoint if infile.suffix in (".ll", ".bc") else RunCreduce
self.reduce_tool = self.get_llvm_ir_reduce_tool(default_tool)
if self.args.output_file:
reduce_input = Path(self.args.output_file).absolute()
else:
reduce_input = infile.with_name(infile.stem + "-reduce" + infile.suffix).absolute()
shutil.copy(str(infile), str(reduce_input))
with tempfile.TemporaryDirectory() as tmpdir:
# run("ulimit -S -c 0".split())
self.reduce_tool.reduce(input_file=reduce_input, extra_args=self.reduce_args, tempdir=tmpdir,
run_cmds=self.run_cmds, run_lines=self.run_lines)
def get_llvm_ir_reduce_tool(self, default_tool=RunBugpoint):
if self.args.reduce_tool is None:
return default_tool(self.options)
# if self.args.reduce_tool == "llvm-reduce-and-bugpoint":
# return RunLLVMReduceAndBugpoint(self.options)
if self.args.reduce_tool == "bugpoint":
return RunBugpoint(self.options)
if self.args.reduce_tool == "llvm-reduce":
return RunLLVMReduce(self.options)
elif self.args.reduce_tool == "noop": # for debugging purposes
return SkipReducing(self.options)
else:
assert self.args.reduce_tool == "creduce"
return RunCreduce(self.options)
def main():
default_bindir = "@CMAKE_BINARY_DIR@/bin"
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--bindir", default=default_bindir,
help="Path to clang build directory. Default is " + default_bindir)
parser.add_argument("--not-cmd", help="Path to `not` tool. Default is $BINDIR/not")
parser.add_argument("--clang-cmd", help="Path to `clang` tool. Default is $BINDIR/clang")
parser.add_argument("--llc-cmd", help="Path to `llc` tool. Default is $BINDIR/llc")
parser.add_argument("--opt-cmd", help="Path to `opt` tool. Default is $BINDIR/opt")
parser.add_argument("--llvm-dis-cmd", help="Path to `llvm-dis` tool. Default is $BINDIR/llvm-dis")
parser.add_argument("--bugpoint-cmd", help="Path to `bugpoint` tool. Default is $BINDIR/bugpoint")
parser.add_argument("--llvm-reduce-cmd", help="Path to `bugpoint` tool. Default is $BINDIR/llvm-reduce")
parser.add_argument("--creduce-cmd", help="Path to `creduce` tool. Default is `creduce`")
parser.add_argument("--output-file", help="The name of the output file")
parser.add_argument("--verbose", action="store_true", help="Print more debug output")
parser.add_argument("--timeout", type=int,
help="Treat the test case as not interesting if it runs longer than n seconds")
parser.add_argument("--extremely-verbose", action="store_true", help="Print tons of debug output")
parser.add_argument("--llvm-error", action="store_true", help="Reduce a LLVM ERROR: message instead of a crash")
parser.add_argument("--infinite-loop", action="store_true", help="Try debugging an infinite loop (-> timed out testcases are interesting)."
"If timeout is not set this will set it to 30 seconds")
parser.add_argument("--crash-message", help="If set the crash must contain this message to be accepted for reduction."
" This is useful if creduce ends up generating another crash bug that is not the one being debugged.")
parser.add_argument("--reduce-tool", help="The tool to use for test case reduction. "
"Defaults to `llvm-reduce-and-bugpoint` if input file is a .ll or .bc file and `creduce` otherwise.",
choices=["llvm-reduce-and-bugpoint", "bugpoint", "creduce", "llvm-reduce", "noop"])
parser.add_argument("--no-initial-reduce", help="Pass the original input file to creduce without "
"removing #if 0 regions. Generally this will speed up but in very rare corner "
"cases it might cause the test case to no longer crash.", action="store_true")
parser.add_argument("testcase", help="The file to reduce (must be a testcase with a RUN: line that crashes "
"or a .sh file from a clang crash")
# bash completion for arguments:
try:
# noinspection PyUnresolvedReferences
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
# Disable coredumps to avoid filling up disk space:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
Reducer(parser).run()
if __name__ == "__main__":
main()
| [
"Alexander.Richardson@cl.cam.ac.uk"
] | Alexander.Richardson@cl.cam.ac.uk |
b273a37fb3e2c9240e6ff15a4fa8e29276a8f2ed | ee878b70f2806253ca56fed05e4b81becf980b0e | /mondayweek3_inclass_palmer.py | 0b7eb6ddc5363e667b87d5021811760b8d275603 | [] | no_license | dpalmer4/clsm_palmer | b41aaf106dad06c34e5b0c54ff88caf7089de1ee | fcd48ec66f417b2cf5c35aec5b3ca5ae5bedc576 | refs/heads/master | 2020-12-19T06:52:34.698282 | 2020-06-01T20:13:34 | 2020-06-01T20:13:34 | 235,654,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 19:49:44 2020
@author: danpa
"""
import numpy as np
import matplotlib.pyplot as plt
x=np.random.rand(1000000)
K=1
T=1
A=1
m=1
z=1
#probability distribution of Energy
E=(-K*T*np.log(K*T*x/A))
plt.hist(E,100,density=1)
plt.xlabel("Energy")
plt.ylabel("probability")
plt.show()
#probability distribution of velocity
vp=np.sqrt(np.log(2/(E*z))*2*K*T/m)-1 #positive velocities
vn=-np.sqrt(np.log(2/(E*z))*2*K*T/m)+1 #negative velocities
v=np.concatenate((vp,vn))
plt.hist(v,100,density=1)
plt.show() | [
"60151424+dpalmer4@users.noreply.github.com"
] | 60151424+dpalmer4@users.noreply.github.com |
7220d8b3f098d3920a210f0c1d7ea9868a3937a3 | 68ae37ca44da5bcd9e31655ad87bf5aff1385dce | /l_5/learning_user/learning_user/settings.py | 116a17c73ca70ade268ba660f317837e72b94855 | [] | no_license | Ashishprashar/django-example | b5d49902799dc1949719c9e33d15473db66c8b07 | 47970373167f4d0572961ba48af67d9fe4543e60 | refs/heads/main | 2023-04-11T04:54:03.499983 | 2021-04-11T09:45:36 | 2021-04-11T09:45:36 | 356,811,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py | """
Django settings for learning_user project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATES_DIRS = os.path.join(BASE_DIR,'templates')
STATIC_DIRS = os.path.join(BASE_DIR,'static')
MEDIA_DIRS = os.path.join(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kj==c(9@(=5_kta3quxu%qh1wq4xf_k+m1n$b=*xi%1u+na&-h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['ashishdjango.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basic_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_user.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIRS],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_user.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTONS': {'min_length': 9},
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIRS,]
MEDIA_ROOT=MEDIA_DIRS
MEDIA_URL ='/media/'
LOGIN_URL ='/basic_app/user_login' | [
"ak2917065@gmail.com"
] | ak2917065@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.