blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ff2bca4503ea9f750bdf2e9302a68d044f31976 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/genConsts/ACHIEVEMENTS_ALIASES.py | a1ac9cb63c4d955caad56510b952f353f13adbe0 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 668 | py | # 2017.05.04 15:24:50 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/ACHIEVEMENTS_ALIASES.py
class ACHIEVEMENTS_ALIASES(object):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
"""
GREY_COUNTER = 'GreyCounter_UI'
YELLOW_COUNTER = 'YellowCounter_UI'
RED_COUNTER = 'RedCounter_UI'
BEIGE_COUNTER = 'BeigeCounter_UI'
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\genConsts\ACHIEVEMENTS_ALIASES.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:50 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
758a84fd0672f8afe1f8b3219aa65bafbbec84ef | f0e25779a563c2d570cbc22687c614565501130a | /LeetCode/Stack/739_daily_temperatures.py | eb4e5e5065af97f3fce52ffc691277a8b67a24e6 | [] | no_license | XyK0907/for_work | 8dcae9026f6f25708c14531a83a6593c77b38296 | 85f71621c54f6b0029f3a2746f022f89dd7419d9 | refs/heads/master | 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | class Solution(object):
def dailyTemperatures(self, T):
"""
Time O(n)
Space O(W) W is the number of allowed values for T[i]
:type T: List[int]
:rtype: List[int]
"""
length = len(T)
res = [0] * length
stack = []
for i in range(length):
while stack and T[stack[-1]] < T[i]:
prev_idx = stack.pop()
res[prev_idx] = i - prev_idx
stack.append(i)
return res
def dailyTemperatures_another(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
length = len(T)
res = [0] * length
stack = []
for i in range(length - 1, -1, -1):
while stack and T[stack[-1]] <= T[i]:
stack.pop()
if stack:
res[i] = stack[-1] - i
stack.append(i)
return res
if __name__ == '__main__':
solution = Solution()
print(solution.dailyTemperatures_another(T = [73, 74, 75, 71, 69, 72, 76, 73])) | [
"cherry.kong0907@gmail.com"
] | cherry.kong0907@gmail.com |
680928b2ad532848f8dc1b18f365d874146fd9e7 | ff5d91e9eee4dd41e85d418120f11daec71cf93b | /2011/nazionali/scuola/gen/generatore.py | 99b8f8ad290fc1d89238d99730ffe88c4278e660 | [] | no_license | olimpiadi-informatica/oii | d0023c5fa00100cadc6a13b1e153fca0017177ca | ce6bc7e8b40a32c01611f4b20ee72f8a9318eafd | refs/heads/master | 2021-05-16T02:35:15.742339 | 2020-03-14T21:56:18 | 2020-03-14T21:56:18 | 28,759,072 | 31 | 6 | null | 2019-12-15T12:37:53 | 2015-01-04T00:55:52 | C++ | UTF-8 | Python | false | false | 508 | py | #!/usr/bin/env python2
from limiti import *
usage="""Generatore per "scuola".
Parametri:
* N (numero di eroi)
* P (tipo di prova)
Constraint:
* 1 <= N < %d
* P == 1 || P == 2
* P == 2 => N = 2^n
""" % MAXN
from sys import argv, exit, stderr
import os
from numpy.random import seed, random, randint
from random import choice, sample
def run(N, S):
print N, P
if __name__ == "__main__":
if len(argv) != 3:
print usage
exit(1)
N, P = [int(x) for x in argv[1:]]
run(N, P)
| [
"williamdiluigi@gmail.com"
] | williamdiluigi@gmail.com |
898bfbc18f1fc8480db3c2f533cd9d6fb01c31cf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02556/s103128743.py | c6fdb69398ed5464103d08827a895a139796cf40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import sys
input=sys.stdin.buffer.readline
#sys.setrecursionlimit(10**9)
#from functools import lru_cache
def RD(): return sys.stdin.read()
def II(): return int(input())
def MI(): return map(int,input().split())
def MF(): return map(float,input().split())
def LI(): return list(map(int,input().split()))
def LF(): return list(map(float,input().split()))
def TI(): return tuple(map(int,input().split()))
# rstrip().decode('utf-8')
def main():
n=II()
XY=[LI() for _ in range(n)]
A=[]
B=[]
for x,y in XY:
A.append(x+y)
B.append(x-y)
A.sort()
B.sort()
a=A[-1]-A[0]
b=B[-1]-B[0]
print(max(a,b))
if __name__=="__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
14ce239b85d1439ce6c80141ceab72ecb1669bb1 | dd44e145ac547209f5f209bc9b1f09189bb8b5c7 | /Python-Advanced-2021/05.Functions-advanced-E/06.Arguments_length.py | dda228317ababf04a6c14a2f24409130d536f52c | [] | no_license | todorovventsi/Software-Engineering | e3c1be8f0f72c85619518bb914d2a4dbaac270f8 | 64ffa6c80b190e7c6f340aaf219986f769f175ab | refs/heads/master | 2023-07-09T05:35:14.522958 | 2021-08-15T14:35:55 | 2021-08-15T14:35:55 | 336,056,643 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | def args_length(*args):
return len(args)
print(args_length(1, 32, 5)) | [
"todorov.ventsi@gmail.com"
] | todorov.ventsi@gmail.com |
7e43fc34dac85bfbb84793a1b18695c6592979f3 | 27fc04a95b0d268adef4d4497c27ea9ae295d8a4 | /ch07/clac_test.py | 46e03817b195210e4aec46805a5e743bf88dcd2b | [] | no_license | s-kyum/Python | 2b35b333557db0698a3fd305d550baaa5304f206 | e5b31036acd2bfb79f98ff02d59096a2429eb41f | refs/heads/master | 2023-07-09T18:45:26.179057 | 2021-08-23T03:07:57 | 2021-08-23T03:07:57 | 378,803,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | from myclass.calculator2 import Calculator as c
s2=c(15,5)
print(s2.add()) | [
"s-kyum@naver.com"
] | s-kyum@naver.com |
2f530cb3eda17fc6b071b7ae0c313d303a5c766e | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Classification/Fasttext/step1_get_data_to_examples.py | 55cc1e4866d8e585585ad74b7ba4095138ea7202 | [] | no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 2,660 | py | # -*- encoding: utf-8 -*-
'''
@File : construct_data.py
@Time : 2020/11/04 13:49:38
@Author : xiaolu
@Contact : luxiaonlp@163.com
'''
import gzip
import pickle
import json
import jieba
from tqdm import tqdm
import random
class RankExample(object):
def __init__(self,
doc_id,
question_text,
context,
answer=None,
label=None,
keywords=None
):
# keywords
self.doc_id = doc_id
self.question_text = question_text
self.context = context
self.answer = answer
self.label = label
self.keywords = keywords
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "doc_id: %s" % (str(self.doc_id))
s += ", question_text: %s" % (self.question_text)
s += ", context: %s" % (self.context)
s += ", answer: %s" % (self.answer)
s += ", label: %d" % (self.label)
s += ", keyword: {}".format(self.keywords)
return s
def construct(data):
doc_id = 0
examples = []
pos_sample = 0
neg_sample = 0
for item in tqdm(data):
question = item['question']
answer = item['answer']
related_doc = item['related_doc']
if len(related_doc) == 0:
continue
for doc in related_doc:
doc_id += 1
text = doc['body']
keywords = doc['keywords']
if text.find(answer) != -1:
pos_sample += 1
examples.append(RankExample(doc_id=doc_id, question_text=question, context=text, answer=answer, label=1, keywords=keywords))
else:
neg_sample += 1
examples.append(RankExample(doc_id=doc_id, question_text=question, context=text, answer=answer, label=0, keywords=keywords))
print('正样本个数:', pos_sample) # 48611 12324
print('负样本个数:', neg_sample) # 692525 170526
# 训练集 正:负=48611:692525
# 验证集 正:负=12324:170526
return examples
if __name__ == '__main__':
# 加载全部数据
train_data = json.load(open('./data/train_policy.json', 'r', encoding='utf8'))
dev_data = json.load(open('./data/dev_policy.json', 'r', encoding='utf8'))
train_examples = construct(train_data)
with gzip.open('./data/train_examples.pkl.gz', 'wb') as fout:
pickle.dump(train_examples, fout)
dev_examples = construct(dev_data)
with gzip.open('./data/dev_examples.pkl.gz', 'wb') as fout:
pickle.dump(dev_examples, fout)
| [
"luxiaonlp@163.com"
] | luxiaonlp@163.com |
3fb5e7917f3a7f42b3b3e6a4fe1551923b895957 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/302/69507/submittedfiles/testes.py | 41b941ca1b9f17291580bab0acaa2052cb564863 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | n = float(input('Digite o número de lados do polígono'))
if n<=2:
print('Isso não é um polígono')
elif n>2:
nd = ((n*(n-3))/2)
print (nd)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2c5b7a15346084a914009c961ec4e8d19650c0e9 | 062dbbed1d3c030bba07cfa8ba8c4484c985e03b | /sixtySecond.py | 1d8894237434ad60ea311236021d83cb12fa8b8a | [] | no_license | alindsharmasimply/Python_Practice_2 | 10ed81ba3ee1962d952127d1e1e74b48161d9422 | f7ae50f0b209856ba89fd3ea660297534a1b53bb | refs/heads/master | 2021-05-04T18:40:11.903533 | 2017-12-31T16:16:30 | 2017-12-31T16:16:30 | 105,950,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | from itertools import combinations
s = 'ALIND'
print list(combinations(s, 2))
| [
"alindsharmasimply@gmail.com"
] | alindsharmasimply@gmail.com |
aeea0b4cae09144645d7594a809090b801476613 | ff0cdf438fbc202fe621ed90472ce9d93acd383b | /make_pdf/make_pptx.py | 5251fe6e6c2bf2f5c3f305315d2c331c53d5a3b9 | [
"MIT",
"Apache-2.0"
] | permissive | nishio/idea-generation | e0284f34ebb163660f6b5d45963a8528f4eb3cb4 | 7d7fa08456243dc63c9c80d15244f39b73814ad9 | refs/heads/master | 2021-11-10T08:33:13.597884 | 2021-10-24T03:51:12 | 2021-10-24T03:51:12 | 12,247,640 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
テキストをpptxにする
改行位置制御や図を貼りたいなどの細かいニーズに答えるのは
ユーザの手元のPowerPointでやってもらおうという発想
"""
import argparse
from pptx import Presentation
from pptx.util import Inches, Pt, Cm
from pptx.enum.text import MSO_AUTO_SIZE, PP_ALIGN, MSO_ANCHOR
def main():
parser = argparse.ArgumentParser(description='Text to PPTX.')
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
if args.test:
texts = ['あ' * x for x in range(1, 101)]
else:
import sys
texts = sys.stdin.read().split('\n')
make_pptx(texts)
def find_best_fontsize(text):
sizes = [415, 415, 346, 240]
chars = len(text.decode('utf-8').encode('sjis')) / 2
if chars < len(sizes):
return sizes[chars]
# means 'if chars leq 6 (2 * 3), fontsize is 200pt'
sizes = [
(2 * 3, 200), (2 * 4, 167), (3 * 4, 159),
(3 * 5, 125), (4 * 6, 110), (5 * 8, 90),
(5 * 9, 80), (6 * 10, 70), (7 * 14, 60),
]
for lim, siz in sizes:
if chars <= lim:
return siz
return 50
def make_pptx(texts):
prs = Presentation()
for text in texts:
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
txBox = slide.shapes.add_textbox(0, 0, Cm(25.4), Cm(19.1))
tf = txBox.textframe
tf.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE
tf.word_wrap = True
tf.vertical_anchor = MSO_ANCHOR.MIDDLE
p = tf.paragraphs[0]
p.text = text
p.font.size = Pt(find_best_fontsize(text))
p.alignment = PP_ALIGN.CENTER
prs.save('test.pptx')
if __name__ == '__main__':
main()
| [
"nishio.hirokazu@gmail.com"
] | nishio.hirokazu@gmail.com |
6d57d337bb381af4b0baadbe7ffb9536a14c12ee | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/89f5b02f9e63478881ea0f0106bf295d.py | aefb9d4b1561027d8513e1650a48c28554dd61ce | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 457 | py | import string
def hey(input_text):
if is_all_caps(input_text):
return "Woah, chill out!"
if is_question(input_text):
return "Sure."
if is_empty(input_text):
return "Fine. Be that way!"
return "Whatever."
def is_question(input_text):
return input_text.endswith("?")
def is_empty(input_text):
return string.strip(input_text) == ''
def is_all_caps(input_text):
return input_text.isupper()
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1c5b53877ed351ea56b9e794d857dd6c10d9c66c | 76dcba11031090ac69dee281ef18ae98cdd43ff9 | /IssueTrackerProduct/I18N.py | ba3cc3b59a2ebd0d8f45b768dd3c33b82cdab7ce | [] | no_license | sureshvv/IssueTrackerProduct | 137b34a3123ea8823af18aa9c0161dad840b93da | 817820377288330f9e318428cd743659476e625d | refs/heads/master | 2021-01-18T11:22:46.844607 | 2014-04-21T17:15:58 | 2014-04-21T17:15:58 | 1,546,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | #try:
# from Products.PlacelessTranslationService.MessageID import MessageIDFactory
# _ = MessageIDFactory('itp')
#except ImportError:
# def _(s):
# return s
def _(s, *a, **k):
return s | [
"mail@peterbe.com"
] | mail@peterbe.com |
59682c24e6d34e0adc8df231f18c615f80f35d74 | 6d1016e97e02343b8d85ddbd5f5d1406261eabfd | /test/view_helpers/Test_DataTable_Js_Views.py | a4659e5a5f0be0edb2d6aa97169f679f20c61858 | [
"Apache-2.0"
] | permissive | MaeveScarryPBX/serverless-render | 1c2c2dbf228e5fb69d67d7acd89bf5a49fc69087 | 44365e7e0ab6e04fb304a7091ceeab41f67d8d88 | refs/heads/master | 2020-04-29T05:05:47.078806 | 2019-03-14T21:29:12 | 2019-03-14T21:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | from unittest import TestCase
from browser.Browser_Lamdba_Helper import Browser_Lamdba_Helper
from view_helpers.DataTable_Js_Views import DataTable_Js_Views
class Test_DataTable_Js_Views(TestCase):
def setUp(self):
self.graph_name = 'graph_XKW'
self.png_data = None
def tearDown(self):
Browser_Lamdba_Helper().save_png_data(self.png_data)
def test_graph(self):
graph_name = 'graph_XKW' # (7 nodes)
graph_name = 'graph_MKF' # ( 20 nodes, 27 edges)
#graph_name = 'graph_YT4' # (199 nodes, 236 edges)
#graph_name = 'graph_VZ5' # (367 nodes, 653 edges)
graph_name = 'graph_W4T' # R1 Labels (from search results)
graph_name = 'graph_9CP'
self.png_data = DataTable_Js_Views.graph(params=[graph_name])
def test_graph_all_fields(self):
graph_name = 'graph_XKW' # (7 nodes)
#graph_name = 'graph_MKF' # ( 20 nodes, 27 edges)
#graph_name = 'graph_YT4' # (199 nodes, 236 edges)
#graph_name = 'graph_VZ5' # (367 nodes, 653 edges)
self.png_data = DataTable_Js_Views.graph_all_fields(params=[graph_name])
def graph_all_fields__issue_id(self):
graph_name = 'GSSP-111'
self.png_data = DataTable_Js_Views.graph_all_fields(params=[graph_name])
def test_issue(self):
issue_id = 'GSSP-111'
self.png_data = DataTable_Js_Views.issue(params=[issue_id])
def test_test_data(self):
self.png_data = DataTable_Js_Views.test_data() | [
"dinis.cruz@owasp.org"
] | dinis.cruz@owasp.org |
eb46a6adff419c50985746380b87bd5fc7a15f99 | ea35facf6d823e93706b5f551408250b1e089be9 | /共通問題/list_1.py | 5274fcd3cbbe64855729d3ea8ab46df9f7dc9356 | [] | no_license | YukiNGSM/PythonStudy | 7a2d24f4762e384531eadd691858296b00b6a6b3 | 26310d0e007745ff4920ccd0fc3e51771cb2d5f1 | refs/heads/master | 2023-07-19T00:06:29.061255 | 2021-09-22T01:29:49 | 2021-09-22T01:29:49 | 409,025,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | x=[3,15,123]
print(x[1]) | [
"ykh2135239@o-hara.ac.jp"
] | ykh2135239@o-hara.ac.jp |
e3b5cc549923afa53dac368cc06752b312063f1d | 8e189cd5a2cb08dc7e50f4c30941450f4be8b590 | /manage.py | 35c5ad802e792300f7dcec80015f714e7e4c015f | [] | no_license | puxiaoshuai/AI_blog | 5b089e15bfb6b6ca43aed2dd3255f97d49c6fa49 | 3448277c3217fe41fddbc759274b3f8b8b2236b4 | refs/heads/master | 2020-04-14T20:04:02.293078 | 2019-01-04T08:42:12 | 2019-01-04T08:42:12 | 164,081,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AI_Blog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"1372553910@qq.com"
] | 1372553910@qq.com |
6a9b86aef458d8da67395ff229fdfb3f7fb9988e | c7e753eff114ea692057c406ff9ce2fe8a7c1adb | /tests/test_cmd.py | 931cbd54d7684c775e23d66ca5048858c5825b63 | [] | permissive | LSSTDESC/ceci | 67fa0df56dde8c61947e5d8c2a20796beaeeb422 | 5c683f8fd6a8bdd2b36c1f6f03f4d24599307a54 | refs/heads/master | 2023-09-01T08:05:04.830316 | 2023-08-02T12:47:49 | 2023-08-02T12:47:49 | 120,935,349 | 9 | 10 | BSD-3-Clause | 2023-08-23T12:46:07 | 2018-02-09T17:29:45 | Python | UTF-8 | Python | false | false | 3,158 | py | from ceci.pipeline import StageExecutionConfig
from ceci.sites.local import LocalSite
from ceci.sites.nersc import NerscBatchSite
import os
import pytest
class MockSite:
def __init__(self):
self.config = {"image": "abc", "volume": "def"}
def test_defaults():
sec = StageExecutionConfig({"name": "a", "site": MockSite()})
assert sec.nodes == 1
assert sec.nprocess == 1
assert sec.threads_per_process == 1
assert sec.mem_per_process == 2
assert sec.image == "abc"
assert sec.volume == "def"
def test_local():
site = LocalSite({})
sec = StageExecutionConfig({"name": "a", "site": site})
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "docker" not in cmd
assert "shifter" not in cmd
assert "OMP_NUM_THREADS=1" in cmd
assert cmd1 in cmd
def test_docker():
site = LocalSite({})
sec = StageExecutionConfig(
{
"name": "a",
"site": site,
"image": "username/potato",
"volume": "a:b",
"threads_per_process": 4,
"nprocess": 2,
}
)
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "docker run" in cmd
assert "username/potato" in cmd
assert "-v a:b" in cmd
assert "mpirun -n 2" in cmd
assert "shifter" not in cmd
assert "OMP_NUM_THREADS=4" in cmd
assert cmd1 in cmd
def _test_nersc(job_id):
site = NerscBatchSite({})
# fake that we're runnng a job to avoid complaints
initial = os.environ.get("SLURM_JOB_ID")
if job_id:
os.environ["SLURM_JOB_ID"] = "fake_job_id"
elif initial is not None:
del os.environ["SLURM_JOB_ID"]
try:
sec = StageExecutionConfig(
{
"name": "a",
"site": site,
"image": "username/potato",
"volume": "a:b",
"threads_per_process": 4,
"nprocess": 2,
"nodes": 3,
}
)
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "shifter" in cmd
assert "--image username/potato" in cmd
assert "-V a:b" in cmd
assert "srun -u -n 2" in cmd
assert "--env OMP_NUM_THREADS=4" in cmd
assert "--nodes 3" in cmd
assert "--mpi" in cmd
assert cmd1 in cmd
finally:
if job_id:
if initial is None:
del os.environ["SLURM_JOB_ID"]
else:
os.environ["SLURM_JOB_ID"] = initial
elif initial is not None:
os.environ["SLURM_JOB_ID"] = initial
def test_works():
_test_nersc(True)
def test_warning():
with pytest.raises(ValueError):
_test_nersc(False)
| [
"joezuntz@googlemail.com"
] | joezuntz@googlemail.com |
02aaf635c1204d0454362589945ad6f6a3f2289f | 2fb0af0a30e3133ef4c5e649acd3f9911430062c | /src/toontown/safezone/SafeZoneLoader.py | 1f63df6f7233de27cd5547986da059f8c751f6fb | [] | no_license | Teku16/Toontown-Crystal-Master | 4c01c0515f34a0e133441d2d1e9f9156ac267696 | 77a9345d52caa350ee0b1c7ad2b7461a3d6ed830 | refs/heads/master | 2020-05-20T06:02:58.106504 | 2015-07-25T07:23:59 | 2015-07-25T07:23:59 | 41,053,558 | 0 | 1 | null | 2015-08-19T18:51:11 | 2015-08-19T18:51:11 | null | UTF-8 | Python | false | false | 10,248 | py | from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.distributed.ToontownMsgTypes import *
from toontown.hood import ZoneUtil
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import Place
from direct.showbase import DirectObject
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from toontown.toon import HealthForceAcknowledge
from toontown.toon.Toon import teleportDebug
from toontown.toonbase.ToontownGlobals import *
from toontown.building import ToonInterior
from toontown.hood import QuietZoneState
from toontown.dna.DNAParser import *
from direct.stdpy.file import *
class SafeZoneLoader(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('SafeZoneLoader')
def __init__(self, hood, parentFSMState, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.fsm = ClassicFSM.ClassicFSM('SafeZoneLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'playground', 'toonInterior']),
State.State('playground', self.enterPlayground, self.exitPlayground, ['quietZone']),
State.State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['playground', 'toonInterior']),
State.State('golfcourse', self.enterGolfcourse, self.exitGolfcourse, ['quietZone', 'playground']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.placeDoneEvent = 'placeDone'
self.place = None
self.playgroundClass = None
return
def load(self):
self.music = base.loadMusic(self.musicFile)
self.activityMusic = base.loadMusic(self.activityMusicFile)
self.createSafeZone(self.dnaFile)
self.parentFSMState.addChild(self.fsm)
def unload(self):
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
self.geom.removeNode()
del self.geom
del self.fsm
del self.hood
del self.nodeList
del self.playgroundClass
del self.music
del self.activityMusic
del self.holidayPropTransforms
self.deleteAnimatedProps()
self.ignoreAll()
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
self.fsm.enterInitialState()
messenger.send('enterSafeZone')
self.setState(requestStatus['where'], requestStatus)
if not base.config.GetBool('want-parties', True):
partyGate = self.geom.find('**/prop_party_gate_DNARoot')
if not partyGate.isEmpty():
partyGate.removeNode()
def exit(self):
messenger.send('exitSafeZone')
def setState(self, stateName, requestStatus):
self.fsm.request(stateName, [requestStatus])
def createSafeZone(self, dnaFile):
if self.safeZoneStorageDNAFile:
dnaBulk = DNABulkLoader(self.hood.dnaStore, (self.safeZoneStorageDNAFile,))
dnaBulk.loadDNAFiles()
node = loadDNAFile(self.hood.dnaStore, dnaFile)
if node.getNumParents() == 1:
self.geom = NodePath(node.getParent(0))
self.geom.reparentTo(hidden)
else:
self.geom = hidden.attachNewNode(node)
self.makeDictionaries(self.hood.dnaStore)
self.createAnimatedProps(self.nodeList)
self.holidayPropTransforms = {}
npl = self.geom.findAllMatches('**/=DNARoot=holiday_prop')
for i in xrange(npl.getNumPaths()):
np = npl.getPath(i)
np.setTag('transformIndex', `i`)
self.holidayPropTransforms[i] = np.getNetTransform()
gsg = base.win.getGsg()
if gsg:
self.geom.prepareScene(gsg)
self.geom.flattenMedium()
def makeDictionaries(self, dnaStore):
self.nodeList = []
for i in xrange(dnaStore.getNumDNAVisGroups()):
groupFullName = dnaStore.getDNAVisGroupName(i)
groupName = base.cr.hoodMgr.extractGroupName(groupFullName)
groupNode = self.geom.find('**/' + groupFullName)
if groupNode.isEmpty():
self.notify.error('Could not find visgroup')
groupNode.flattenMedium()
self.nodeList.append(groupNode)
self.removeLandmarkBlockNodes()
self.hood.dnaStore.resetPlaceNodes()
self.hood.dnaStore.resetDNAGroups()
self.hood.dnaStore.resetDNAVisGroups()
self.hood.dnaStore.resetDNAVisGroupsAI()
def removeLandmarkBlockNodes(self):
npc = self.geom.findAllMatches('**/suit_building_origin')
for i in xrange(npc.getNumPaths()):
npc.getPath(i).removeNode()
def enterStart(self):
pass
def exitStart(self):
pass
def enterPlayground(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handlePlaygroundDone)
self.place = self.playgroundClass(self, self.fsm, self.placeDoneEvent)
self.place.load()
self.place.enter(requestStatus)
base.cr.playGame.setPlace(self.place)
def exitPlayground(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handlePlaygroundDone(self):
status = self.place.doneStatus
teleportDebug(status, 'handlePlaygroundDone, doneStatus=%s' % (status,))
if ZoneUtil.getBranchZone(status['zoneId']) == self.hood.hoodId and status['shardId'] == None:
teleportDebug(status, 'same branch')
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
teleportDebug(status, 'different hood')
messenger.send(self.doneEvent)
return
def enterToonInterior(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handleToonInteriorDone)
self.place = ToonInterior.ToonInterior(self, self.fsm.getStateNamed('toonInterior'), self.placeDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitToonInterior(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleToonInteriorDone(self):
status = self.place.doneStatus
if ZoneUtil.getBranchZone(status['zoneId']) == self.hood.hoodId and status['shardId'] == None:
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterQuietZone(self, requestStatus):
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState.QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
if status['where'] == 'estate':
self.doneStatus = status
messenger.send(self.doneEvent)
else:
self.fsm.request(status['where'], [status])
def enterFinal(self):
pass
def exitFinal(self):
pass
def createAnimatedProps(self, nodeList):
self.animPropDict = {}
for i in nodeList:
animPropNodes = i.findAllMatches('**/animated_prop_*')
numAnimPropNodes = animPropNodes.getNumPaths()
for j in xrange(numAnimPropNodes):
animPropNode = animPropNodes.getPath(j)
if animPropNode.getName().startswith('animated_prop_generic'):
className = 'GenericAnimatedProp'
else:
className = animPropNode.getName()[14:-8]
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
animPropObj = classObj(animPropNode)
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(animPropObj)
interactivePropNodes = i.findAllMatches('**/interactive_prop_*')
numInteractivePropNodes = interactivePropNodes.getNumPaths()
for j in xrange(numInteractivePropNodes):
interactivePropNode = interactivePropNodes.getPath(j)
className = 'GenericAnimatedProp'
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
interactivePropObj = classObj(interactivePropNode)
animPropList = self.animPropDict.get(i)
if animPropList is None:
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(interactivePropObj)
return
def deleteAnimatedProps(self):
for zoneNode, animPropList in self.animPropDict.items():
for animProp in animPropList:
animProp.delete()
del self.animPropDict
def enterAnimatedProps(self, zoneNode):
for animProp in self.animPropDict.get(zoneNode, ()):
animProp.enter()
def exitAnimatedProps(self, zoneNode):
for animProp in self.animPropDict.get(zoneNode, ()):
animProp.exit()
def enterGolfcourse(self, requestStatus):
base.transitions.fadeOut(t=0)
def exitGolfcourse(self):
pass
| [
"vincentandrea15k@gmail.com"
] | vincentandrea15k@gmail.com |
e545de69bc868a2bb6cdfebd2ab53a87e2dbcdc5 | 8d35b8aa63f3cae4e885e3c081f41235d2a8f61f | /discord/ext/dl/extractor/performgroup.py | 553b2b6e861bedeaababf7888ba0a15b779f4d71 | [
"MIT"
] | permissive | alexyy802/Texus | 1255f4e54c8d3cc067f0d30daff1cf24932ea0c9 | c282a836f43dfd588d89d5c13f432896aebb540f | refs/heads/master | 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 | MIT | 2021-11-19T09:22:22 | 2021-11-18T10:43:11 | Python | UTF-8 | Python | false | false | 3,674 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class PerformGroupIE(InfoExtractor):
_VALID_URL = r"https?://player\.performgroup\.com/eplayer(?:/eplayer\.html|\.js)#/?(?P<id>[0-9a-f]{26})\.(?P<auth_token>[0-9a-z]{26})"
_TESTS = [
{
# http://www.faz.net/aktuell/sport/fussball/wm-2018-playoffs-schweiz-besiegt-nordirland-1-0-15286104.html
"url": "http://player.performgroup.com/eplayer/eplayer.html#d478c41c5d192f56b9aa859de8.1w4crrej5w14e1ed4s1ce4ykab",
"md5": "259cb03d142e2e52471e8837ecacb29f",
"info_dict": {
"id": "xgrwobuzumes1lwjxtcdpwgxd",
"ext": "mp4",
"title": "Liga MX: Keine Einsicht nach Horrorfoul",
"description": "md5:7cd3b459c82725b021e046ab10bf1c5b",
"timestamp": 1511533477,
"upload_date": "20171124",
},
}
]
def _call_api(self, service, auth_token, content_id, referer_url):
return self._download_json(
"http://ep3.performfeeds.com/ep%s/%s/%s/"
% (service, auth_token, content_id),
content_id,
headers={
"Referer": referer_url,
"Origin": "http://player.performgroup.com",
},
query={
"_fmt": "json",
},
)
def _real_extract(self, url):
player_id, auth_token = re.search(self._VALID_URL, url).groups()
bootstrap = self._call_api("bootstrap", auth_token, player_id, url)
video = bootstrap["config"]["dataSource"]["sourceItems"][0]["videos"][0]
video_id = video["uuid"]
vod = self._call_api("vod", auth_token, video_id, url)
media = vod["videos"]["video"][0]["media"]
formats = []
hls_url = media.get("hls", {}).get("url")
if hls_url:
formats.extend(
self._extract_m3u8_formats(
hls_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
hds_url = media.get("hds", {}).get("url")
if hds_url:
formats.extend(
self._extract_f4m_formats(
hds_url + "?hdcore", video_id, f4m_id="hds", fatal=False
)
)
for c in media.get("content", []):
c_url = c.get("url")
if not c_url:
continue
tbr = int_or_none(c.get("bitrate"), 1000)
format_id = "http"
if tbr:
format_id += "-%d" % tbr
formats.append(
{
"format_id": format_id,
"url": c_url,
"tbr": tbr,
"width": int_or_none(c.get("width")),
"height": int_or_none(c.get("height")),
"filesize": int_or_none(c.get("fileSize")),
"vcodec": c.get("type"),
"fps": int_or_none(c.get("videoFrameRate")),
"vbr": int_or_none(c.get("videoRate"), 1000),
"abr": int_or_none(c.get("audioRate"), 1000),
}
)
self._sort_formats(formats)
return {
"id": video_id,
"title": video["title"],
"description": video.get("description"),
"thumbnail": video.get("poster"),
"duration": int_or_none(video.get("duration")),
"timestamp": int_or_none(video.get("publishedTime"), 1000),
"formats": formats,
}
| [
"noreply@github.com"
] | alexyy802.noreply@github.com |
2d1fc9f47ee7353e4bd96d192e7c013fa411569f | 022a0cb0d0873da0c25da6b6aa8b258b80a4b7e0 | /1955.py | 5669c8dd6cf03ae1b9f356ba0f28ac626288e446 | [] | no_license | subayadhav/fri07061 | 31e1e89ac1be60c736450f749486651968cfeba4 | 380f5d108869ad4cde16140dc21a88f2a7972722 | refs/heads/master | 2020-06-01T06:43:17.094510 | 2019-06-07T08:47:49 | 2019-06-07T08:47:49 | 190,683,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | n1k=map(int,input().split())
if n1k !=0:
print(n1k)
else:
print(k)
| [
"noreply@github.com"
] | subayadhav.noreply@github.com |
549c3d80595c7be7ec9706b113796272180aa681 | b242c102f50ded2fee0cc6ac6aea442a1b6792cb | /strategy/65_8只基金按PE调仓.py | f5e4523c7a6542df7ee6f4aba6ead2c1ffa43860 | [] | no_license | VIRGIL-YAN/woquant | 2c0811ed743d217b2ec478988ce0808838f1177a | ac5437e0eed552aa9b3015d1ace647c9a492f97d | refs/heads/master | 2023-04-16T17:23:05.754846 | 2021-04-30T08:58:10 | 2021-04-30T08:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,036 | py | 该策略由聚宽用户分享,仅供学习交流使用。
原文网址:https://www.joinquant.com/post/11700
原文一般包含策略说明,如有疑问建议到原文和作者交流讨论。
原文策略源码如下:
# 标题:低回撤,PE分仓
# 作者:桑梓
#自定义数据是股指ETF占的仓位
from __future__ import division
import numpy as np
import pandas as pd
import bisect
def initialize(context):
g.flag = False
run_monthly(monthly, 1, time='open')
set_benchmark('000300.XSHG')
#g.CN10y_bond=0.03
log.set_level('order', 'error')
g.HoldLevel=0
g.LastHoldLevel=0
g.HoldLevel1=0
g.HoldLevel2=0.2
g.HoldLevel3=0.4
g.HoldLevel4=0.6
g.HoldLevel5=0.75
g.HoldLevel6=0.9
#创建8个独立的仓位
init_cash = context.portfolio.starting_cash #获取初始资金
init_cash = context.portfolio.starting_cash/8 #将初始资金等分为10份
set_subportfolios([SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock')])
g.stocks = {
'hs300':['000300.XSHG','510300.XSHG',context.subportfolios[0],0,14.10,44.42],
'zzhb':['000827.XSHG','512580.XSHG',context.subportfolios[1],1,26.78,72.29], #中证保
'zz500':['000905.XSHG','510510.XSHG',context.subportfolios[2],2,21.99,69.81], #中证500
'hlzz':['000015.XSHG','510880.XSHG',context.subportfolios[3],3,8.40,46.23], #红利指数
'cyb':['399006.XSHE','159915.XSHE',context.subportfolios[4],4,27.61,121.85], #创业板
'zztmt':['000998.XSHG','150203.XSHE',context.subportfolios[5],5,28.15,108.92], #tmt
'yy':['000933.XSHG','512010.XSHG',context.subportfolios[6],6,22.22,66.82], #医药
'zz100':['000903.XSHG','150012.XSHE',context.subportfolios[7],7,9.81,36.59] #中证100
}
def monthly(context):
g.flag = True
def Relation(n,MaxRatio,MinRatio):
if n>=MaxRatio*0.9+MinRatio:
HoldLevel=g.HoldLevel1
elif n>MaxRatio*0.8+MinRatio:
HoldLevel=g.HoldLevel2
elif n>MaxRatio*0.7+MinRatio:
HoldLevel=g.HoldLevel3
elif n>MaxRatio*0.6+MinRatio:
HoldLevel=g.HoldLevel4
elif n>MaxRatio*0.5+MinRatio:
HoldLevel=g.HoldLevel5
elif n>MaxRatio*0.3+MinRatio: #16.92 0.4=19.36
HoldLevel=g.HoldLevel6
else:
HoldLevel=1
return HoldLevel
#else:
# k=(g.MinHoldLevel-g.MaxHoldLevel)/(g.MaxRatio-g.MinRatio)
# b=g.MinHoldLevel-g.MinRatio*k
# g.HoldLevel=k*n+b
#Debug:
#print 'k=(' +str(g.MaxHoldLevel)+'-'+str(g.MinHoldLevel) + ')/' +\
#'('+str(g.MaxRatio)+'-'+str(g.MinRatio)+')'+' = '+str(k)
#print 'HoldLevel=' +str(k) + '*N' + '+' +str(b)
def PeRatio(code,context): # 计算当前指数PE
date = context.current_dt
stocks = get_index_stocks(code, date)
q = query(valuation).filter(valuation.code.in_(stocks))
df = get_fundamentals(q, date)
if len(df)>0:
pe2 = len(df)/sum([1/p if p>0 else 0 for p in df.pe_ratio])
return pe2
else:
return float('NaN')
#def ChangeHoldLevel(stock,NewHoldLevel,context):
# order_target_value(g.stocks[stock][1],NewHoldLevel*g.stocks[stock][2],pindex=g.stocks[stock][3])
#order_target_value(g.Test_bond,(1-NewHoldLevel)*AllMoney,None)
def handle_data(context, data):
#if context.current_dt.isoweekday()!=1: #ne Marcher que Lundi.
# return
#N= (1/PeRatio(get_current_data()))/g.CN10y_bond
if g.flag == True:
for stock in g.stocks:
index_pe = PeRatio(g.stocks[stock][0],context)
MaxRatio1 = g.stocks[stock][5]
MinRatio = g.stocks[stock][4]
MaxRatio = MaxRatio1-MinRatio
HoldLevel = Relation(index_pe,MaxRatio,MinRatio)
trade_code = g.stocks[stock][1]
cash = g.stocks[stock][2].total_value * HoldLevel
inde = g.stocks[stock][3]
order_target_value(trade_code,cash,pindex=inde)
g.flag = False
'''
N = PeRatio(code,context)
HoldLevel = Relation(N)
ChangeHoldLevel(HoldLevel,context.portfolio.total_value)
print 'PE:%.2f'%N
print "Holdlevel is %.2f" % HoldLevel
record(name=g.HoldLevel)
''' | [
"28278672@qq.com"
] | 28278672@qq.com |
5a178504701103cd6061e28c6f5cff59ceab7594 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon6378.py | 8ce2953ef95739ba3df687df8eebc76b9b14c075 | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | while True:
a=input()
if a=="0":break
while True:
result=sum(int(i) for i in a)
a=str(result)
if int(a)<10:break
print(result) | [
"be_ok91@naver.com"
] | be_ok91@naver.com |
f2d1cdcbd86af861339970878e3c4bcecd5bf9df | c3132612a7ac311e501e432e1a4c7592bbd7a713 | /day09/code/07_迭代器&迭代器对象.py | 0f2533805b097e5b79005e0871676ac0381c71a2 | [] | no_license | everqiujuan/python | 7b8e169107012c3d7829d4ebd9860482fc0d8fec | b0a98de943217e24da60f79dec4fe8ebf4f1c713 | refs/heads/master | 2020-06-21T16:57:22.260311 | 2019-07-18T05:58:44 | 2019-07-18T05:58:44 | 184,990,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py |
from collections import Iterator # 迭代器
from collections import Iterable # 可迭代对象,迭代器对象
# 迭代器
# 迭代器对象:可以使用for-in循环遍历的
# 迭代器对象:可以使用for-in循环的
# list, tuple, dict, set, str, generator对象 都是迭代器对象
print(isinstance([1, 2], Iterable)) # True
print(isinstance((1, 2), Iterable)) # True
print(isinstance({}, Iterable)) # True
print(isinstance({1, 2}, Iterable)) # True
print(isinstance("hello", Iterable)) # True
print(isinstance((i for i in range(1,3)), Iterable)) # True
# 迭代器: 可以使用for-in遍历,且可以next()
print(isinstance([1, 2], Iterator)) # False
print(isinstance((1, 2), Iterator)) # False
print(isinstance({}, Iterator)) # False
print(isinstance({1, 2}, Iterator)) # False
print(isinstance("hello", Iterator)) # False
print(isinstance((i for i in range(1, 3)), Iterator)) # True
# iter: 可以将迭代器对象 转换成 迭代器
list1 = [11, 22, 33]
res = iter(list1)
# print(res) # <list_iterator object at 0x00000000027B7208>
# print(next(res)) # 11
# print(next(res)) # 22
# print(next(res)) # 33
# print(next(res)) # 报错
# list(): 将迭代器转换成列表迭代器对象
list2 = list(res)
print(list2) # [11, 22, 33]
| [
"1748636236@qq.com"
] | 1748636236@qq.com |
472b14b7674ba6998f913e051207c965ee2f4138 | 599bca7f41694112b1367854a81e0bd9162a6f7a | /2020SpringClass/学习笔记/201702064-zhousijia/6zhousijia201702064/code/MiniFramework/DataReader_2_0.py | 70736905f84cb84aac6cdcd6e66768d5c17f3b53 | [
"MIT"
] | permissive | XIAxuanzheFrancis/AIML | f625013a5010799681601cf25b7c4b103226dcc4 | 7e333fd65378c2cbaeedbeaa3560f30e8a341857 | refs/heads/master | 2023-01-19T17:29:45.156651 | 2020-11-29T15:35:52 | 2020-11-29T15:35:52 | 299,838,961 | 2 | 0 | MIT | 2020-09-30T07:16:30 | 2020-09-30T07:16:29 | null | UTF-8 | Python | false | false | 7,754 | py | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
"""
Version 2.0
"""
import numpy as np
from pathlib import Path
from MiniFramework.EnumDef_3_0 import *
"""
X:
x1: feature1 feature2 feature3...
x2: feature1 feature2 feature3...
x3: feature1 feature2 feature3...
......
Y: [if regression, value]
[if binary classification, 0/1]
[if multiple classification, e.g. 4 category, one-hot]
"""
class DataReader_2_0(object):
def __init__(self, train_file, test_file):
self.train_file_name = train_file
self.test_file_name = test_file
self.num_train = 0 # num of training examples
self.num_test = 0 # num of test examples
self.num_validation = 0 # num of validation examples
self.num_feature = 0 # num of features
self.num_category = 0 # num of categories
self.XTrain = None # training feature set
self.YTrain = None # training label set
self.XTest = None # test feature set
self.YTest = None # test label set
self.XTrainRaw = None # training feature set before normalization
self.YTrainRaw = None # training label set before normalization
self.XTestRaw = None # test feature set before normalization
self.YTestRaw = None # test label set before normalization
self.XDev = None # validation feature set
self.YDev = None # validation lable set
# read data from file
def ReadData(self):
train_file = Path(self.train_file_name)
if train_file.exists():
data = np.load(self.train_file_name)
self.XTrainRaw = data["data"]
self.YTrainRaw = data["label"]
assert(self.XTrainRaw.shape[0] == self.YTrainRaw.shape[0])
self.num_train = self.XTrainRaw.shape[0]
self.num_feature = self.XTrainRaw.shape[1]
self.num_category = len(np.unique(self.YTrainRaw))
# this is for if no normalize requirment
self.XTrain = self.XTrainRaw
self.YTrain = self.YTrainRaw
else:
raise Exception("Cannot find train file!!!")
#end if
test_file = Path(self.test_file_name)
if test_file.exists():
data = np.load(self.test_file_name)
self.XTestRaw = data["data"]
self.YTestRaw = data["label"]
assert(self.XTestRaw.shape[0] == self.YTestRaw.shape[0])
self.num_test = self.XTestRaw.shape[0]
# this is for if no normalize requirment
self.XTest = self.XTestRaw
self.YTest = self.YTestRaw
# in case there has no validation set created
self.XDev = self.XTest
self.YDev = self.YTest
else:
raise Exception("Cannot find test file!!!")
#end if
# merge train/test data first, normalize, then split again
def NormalizeX(self):
x_merge = np.vstack((self.XTrainRaw, self.XTestRaw))
x_merge_norm = self.__NormalizeX(x_merge)
train_count = self.XTrainRaw.shape[0]
self.XTrain = x_merge_norm[0:train_count,:]
self.XTest = x_merge_norm[train_count:,:]
def __NormalizeX(self, raw_data):
temp_X = np.zeros_like(raw_data)
self.X_norm = np.zeros((2, self.num_feature))
# 按行归一化,即所有样本的同一特征值分别做归一化
for i in range(self.num_feature):
# get one feature from all examples
x = raw_data[:, i]
max_value = np.max(x)
min_value = np.min(x)
# min value
self.X_norm[0,i] = min_value
# range value
self.X_norm[1,i] = max_value - min_value
x_new = (x - self.X_norm[0,i]) / self.X_norm[1,i]
temp_X[:, i] = x_new
# end for
return temp_X
def NormalizeY(self, nettype, base=0):
if nettype == NetType.Fitting:
y_merge = np.vstack((self.YTrainRaw, self.YTestRaw))
y_merge_norm = self.__NormalizeY(y_merge)
train_count = self.YTrainRaw.shape[0]
self.YTrain = y_merge_norm[0:train_count,:]
self.YTest = y_merge_norm[train_count:,:]
elif nettype == NetType.BinaryClassifier:
self.YTrain = self.__ToZeroOne(self.YTrainRaw, base)
self.YTest = self.__ToZeroOne(self.YTestRaw, base)
elif nettype == NetType.MultipleClassifier:
self.YTrain = self.__ToOneHot(self.YTrainRaw, base)
self.YTest = self.__ToOneHot(self.YTestRaw, base)
def __NormalizeY(self, raw_data):
assert(raw_data.shape[1] == 1)
self.Y_norm = np.zeros((2,1))
max_value = np.max(raw_data)
min_value = np.min(raw_data)
# min value
self.Y_norm[0, 0] = min_value
# range value
self.Y_norm[1, 0] = max_value - min_value
y_new = (raw_data - min_value) / self.Y_norm[1, 0]
return y_new
def DeNormalizeY(self, predict_data):
real_value = predict_data * self.Y_norm[1,0] + self.Y_norm[0,0]
return real_value
def __ToOneHot(self, Y, base=0):
count = Y.shape[0]
temp_Y = np.zeros((count, self.num_category))
for i in range(count):
n = (int)(Y[i,0])
temp_Y[i,n-base] = 1
return temp_Y
# for binary classifier
# if use tanh function, need to set negative_value = -1
def __ToZeroOne(Y, positive_label=1, negative_label=0, positiva_value=1, negative_value=0):
temp_Y = np.zeros_like(Y)
for i in range():
if Y[i,0] == negative_label: # 负类的标签设为0
temp_Y[i,0] = negative_value
elif Y[i,0] == positive_label: # 正类的标签设为1
temp_Y[i,0] = positiva_value
# end if
# end for
return temp_Y
# normalize data by specified range and min_value
def NormalizePredicateData(self, X_predicate):
X_new = np.zeros(X_predicate.shape)
n_feature = X_predicate.shape[0]
for i in range(n_feature):
x = X_predicate[i,:]
X_new[i,:] = (x-self.X_norm[0,i])/self.X_norm[1,i]
return X_new
# need explicitly call this function to generate validation set
def GenerateValidationSet(self, k = 10):
self.num_validation = (int)(self.num_train / k)
self.num_train = self.num_train - self.num_validation
# validation set
self.XDev = self.XTrain[0:self.num_validation]
self.YDev = self.YTrain[0:self.num_validation]
# train set
self.XTrain = self.XTrain[self.num_validation:]
self.YTrain = self.YTrain[self.num_validation:]
def GetValidationSet(self):
return self.XDev, self.YDev
def GetTestSet(self):
return self.XTest, self.YTest
# 获得批样本数据
def GetBatchTrainSamples(self, batch_size, iteration):
start = iteration * batch_size
end = start + batch_size
batch_X = self.XTrain[start:end,:]
batch_Y = self.YTrain[start:end,:]
return batch_X, batch_Y
# permutation only affect along the first axis, so we need transpose the array first
# see the comment of this class to understand the data format
def Shuffle(self):
seed = np.random.randint(0,100)
np.random.seed(seed)
XP = np.random.permutation(self.XTrain)
np.random.seed(seed)
YP = np.random.permutation(self.YTrain)
self.XTrain = XP
self.YTrain = YP
| [
"gjy2poincare@users.noreply.github.com"
] | gjy2poincare@users.noreply.github.com |
4d3ca7f2488eba1e42a04c2da0003f2b91877b3d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop255_pad20_jit15/Sob_k35_s001_Bce_s001/pyr_6s/L5/step12_L2345678.py | ced8db8972c484ac0855846127136af881567d2b | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99,746 | py | '''
目前只有 step12 一定需要切換資料夾到 該komg_model所在的資料夾 才能執行喔!
'''
if(__name__ == "__main__"):
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") + 1 ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step12_result_analyzer import Row_col_exps_analyzer
from step11_L2345678 import *
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir)
#############################################################################################################################################################################################################
ana_dir = template_dir
#############################################################################################################################################################################################################
"""
以下留下一些example這樣子
core_amount == 7 是因為 目前 see_amount == 7 ,想 一個core 一個see
task_amount == 7 是因為 目前 see_amount == 7
single_see_multiprocess == True 代表 see內 還要 切 multiprocess,
single_see_core_amount == 2 代表切2分
所以總共會有 7*2 = 14 份 process 要同時處理,
但建議不要用,已經測過,爆記憶體了
"""
#################################################################################################################################################################################################################
#################################################################################################################################################################################################################
###################
### 1side_1
###################
####### 2side_1
##### 3side_1
### 4side_1
ana_name = "ch032_1side_1_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_1_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
###################
### 1side_2
###################
####### 2side_1
##### 3side_1
ana_name = "ch032_1side_2_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_2_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_2
##### 3side_1
ana_name = "ch032_1side_2_2side_2_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_2_2side_2_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_2_2side_2_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_2_2side_2_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_2_2side_2_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_2_2side_2_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
###################
### 1side_3
###################
####### 2side_1
##### 3side_1
ana_name = "ch032_1side_3_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_2
##### 3side_1
ana_name = "ch032_1side_3_2side_2_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_2_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_3_2side_2_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_2_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_3_2side_2_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_2_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_3
##### 3side_1
ana_name = "ch032_1side_3_2side_3_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_3_2side_3_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_3_2side_3_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_3_2side_3_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_3_2side_3_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_3_2side_3_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_3_2side_3_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
###################
### 1side_4
###################
####### 2side_1
##### 3side_1
ana_name = "ch032_1side_4_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_2
##### 3side_1
ana_name = "ch032_1side_4_2side_2_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_2_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_4_2side_2_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_2_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_2_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_2_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_3
##### 3side_1
ana_name = "ch032_1side_4_2side_3_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_4_2side_3_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_3_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_4_2side_3_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_3_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_3_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_3_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_4
##### 3side_1
ana_name = "ch032_1side_4_2side_4_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_4_2side_4_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_4_2side_4_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_4_2side_4_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_4_2side_4_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_4_2side_4_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
###################
### 1side_5
###################
####### 2side_1
##### 3side_1
ana_name = "ch032_1side_5_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_2
##### 3side_1
ana_name = "ch032_1side_5_2side_2_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_2_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_5_2side_2_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_2_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_2_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_2_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_3
##### 3side_1
ana_name = "ch032_1side_5_2side_3_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_5_2side_3_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_3_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_5_2side_3_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_3_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_3_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_3_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_4
##### 3side_1
ana_name = "ch032_1side_5_2side_4_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_5_2side_4_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_5_2side_4_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_5_2side_4_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_4_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_4_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_5
##### 3side_1
ana_name = "ch032_1side_5_2side_5_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_5_2side_5_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_5_2side_5_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_5_2side_5_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_5
ana_name = "ch032_1side_5_2side_5_3side_5_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_5_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_5_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_5_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_5_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_5_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_5_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_5_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_5_2side_5_3side_5_4side_5_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_5_2side_5_3side_5_4side_5_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
###################
### 1side_6
###################
####### 2side_1
##### 3side_1
ana_name = "ch032_1side_6_2side_1_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_1_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_2
##### 3side_1
ana_name = "ch032_1side_6_2side_2_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_2_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_6_2side_2_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_2_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_2_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_2_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_3
##### 3side_1
ana_name = "ch032_1side_6_2side_3_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_6_2side_3_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_3_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_6_2side_3_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_3_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_3_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_3_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_4
##### 3side_1
ana_name = "ch032_1side_6_2side_4_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_6_2side_4_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_6_2side_4_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_6_2side_4_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_4_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_4_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_5
##### 3side_1
ana_name = "ch032_1side_6_2side_5_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_6_2side_5_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_6_2side_5_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_6_2side_5_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_5
ana_name = "ch032_1side_6_2side_5_3side_5_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_5_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_5_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_5_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_5_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_5_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_5_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_5_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_5_3side_5_4side_5_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_5_3side_5_4side_5_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
####### 2side_6
##### 3side_1
ana_name = "ch032_1side_6_2side_6_3side_1_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_1_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_2
ana_name = "ch032_1side_6_2side_6_3side_2_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_2_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_2_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_2_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_3
ana_name = "ch032_1side_6_2side_6_3side_3_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_3_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_3_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_3_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_3_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_3_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_4
ana_name = "ch032_1side_6_2side_6_3side_4_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_4_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_4_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_4_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_4_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_4_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_4_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_4_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_5
ana_name = "ch032_1side_6_2side_6_3side_5_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_5_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_5_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_5_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_5_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_5_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_5_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_5_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_5_4side_5_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_5_4side_5_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
##### 3side_6
ana_name = "ch032_1side_6_2side_6_3side_6_4side_1_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_1_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_6_4side_2_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_2_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_6_4side_3_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_3_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_6_4side_4_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_4_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_6_4side_5_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_5_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
ana_name = "ch032_1side_6_2side_6_3side_6_4side_6_56side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="mask",
row_col_results=ch032_1side_6_2side_6_3side_6_4side_6_56side_all, show_in_img=True, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=256, img_w=256, fontsize= 8, title_fontsize=16, jump_to=115, fix_size=(256, 256))\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
# .Gather_all_see_final_img()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
2320023c2eb59c4ab96169508000ac9e65da1888 | fcfe929bc654e86a36ca4def29811ce09a86b0f2 | /bin/pilprint.py | de41bae68c007367dda9bc66b59cdbdac14d3d85 | [] | no_license | frclasso/trydjango19 | b3e12500acf116e2c705a3624bbcd6eaa08ca593 | e6f871121c2ec38bc3798752d96400a03287e071 | refs/heads/master | 2020-12-30T22:56:59.320974 | 2017-02-01T16:21:53 | 2017-02-01T16:21:53 | 80,637,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | #!/Users/fabio/Estudo/Prog/Django/coding-for-entrepreneurs/trydjango19/bin/python
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
7889d9e3bb4928344f74b7a14fb81367fd1c9035 | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/gui/scaleform/daapi/view/battle/shared/damage_panel.py | b12696db36e8908d27439c10bb2416f2b89571ea | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,746 | py | # 2016.08.04 19:49:54 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/damage_panel.py
import math
import BattleReplay
import BigWorld
import GUI
import Math
from ReplayEvents import g_replayEvents
from debug_utils import LOG_DEBUG
from gui.Scaleform.daapi.view.meta.DamagePanelMeta import DamagePanelMeta
from gui.Scaleform.genConsts.APP_CONTAINERS_NAMES import APP_CONTAINERS_NAMES
from gui.Scaleform.locale.INGAME_GUI import INGAME_GUI
from gui.app_loader.loader import g_appLoader
from gui.battle_control import g_sessionProvider, vehicle_getter
from gui.battle_control.battle_constants import VEHICLE_GUI_ITEMS
from gui.battle_control.battle_constants import VEHICLE_VIEW_STATE
from helpers import i18n
_STATE_HANDLERS = {VEHICLE_VIEW_STATE.HEALTH: '_updateHealth',
VEHICLE_VIEW_STATE.SPEED: 'as_updateSpeedS',
VEHICLE_VIEW_STATE.CRUISE_MODE: 'as_setCruiseModeS',
VEHICLE_VIEW_STATE.FIRE: 'as_setFireInVehicleS',
VEHICLE_VIEW_STATE.AUTO_ROTATION: 'as_setAutoRotationS',
VEHICLE_VIEW_STATE.DESTROYED: '_updateDestroyed',
VEHICLE_VIEW_STATE.CREW_DEACTIVATED: '_updateCrewDeactivated',
VEHICLE_VIEW_STATE.PLAYER_INFO: '_updatePlayerInfo',
VEHICLE_VIEW_STATE.DEVICES: '_updateDeviceState',
VEHICLE_VIEW_STATE.REPAIRING: '_updateRepairingDevice',
VEHICLE_VIEW_STATE.SWITCHING: '_switching',
VEHICLE_VIEW_STATE.RPM: 'as_setNormalizedEngineRpmS',
VEHICLE_VIEW_STATE.MAX_SPEED: 'as_setMaxSpeedS',
VEHICLE_VIEW_STATE.VEHICLE_MOVEMENT_STATE: '_updateVehicleMovementState',
VEHICLE_VIEW_STATE.VEHICLE_ENGINE_STATE: '_updateVehicleEngineState'}
def _getHealthParams(health, maxHealth):
raise maxHealth > 0 or AssertionError
healthStr = '%d/%d' % (health, maxHealth)
progress = round(100 * health / maxHealth, 0)
return (healthStr, progress)
class _TankIndicatorCtrl(object):
def __init__(self, damagePanel):
app = g_appLoader.getDefBattleApp()
if app is not None:
mc = GUI.WGTankIndicatorFlash(app.movie, '_level0.root.{}.main.damagePanel.tankIndicator'.format(APP_CONTAINERS_NAMES.VIEWS))
mc.wg_inputKeyMode = 2
app.component.addChild(mc, 'tankIndicator')
return
def __del__(self):
LOG_DEBUG('_TankIndicatorCtrl deleted')
def clear(self, ui):
setattr(ui.component, 'tankIndicator', None)
return
def setup(self, ui, vehicle, yawLimits):
if vehicle.isPlayerVehicle:
hullMat = BigWorld.player().getOwnVehicleMatrix()
else:
hullMat = vehicle.matrix
turretMat = vehicle.appearance.turretMatrix
tankIndicator = ui.component.tankIndicator
if yawLimits:
tankIndicator.wg_turretYawConstraints = yawLimits
else:
tankIndicator.wg_turretYawConstraints = Math.Vector2(0.0, 0.0)
tankIndicator.wg_hullMatProv = hullMat
tankIndicator.wg_turretMatProv = turretMat
class DamagePanel(DamagePanelMeta):
def __init__(self):
super(DamagePanel, self).__init__()
self.__tankIndicator = None
self.__isShow = True
self._maxHealth = 0
return
def __del__(self):
LOG_DEBUG('DamagePanel deleted')
def showAll(self, isShow):
if self.__isShow != isShow:
self.__isShow = isShow
self.as_showS(isShow)
def getTooltipData(self, entityName, state):
if entityName in VEHICLE_GUI_ITEMS:
formatter = '#ingame_gui:damage_panel/devices/{}/{}'
else:
formatter = '#ingame_gui:damage_panel/crew/{}/{}'
return i18n.makeString(formatter.format(entityName, state))
def clickToTankmanIcon(self, entityName):
self.__changeVehicleSetting('medkit', entityName)
def clickToDeviceIcon(self, entityName):
self.__changeVehicleSetting('repairkit', entityName)
def clickToFireIcon(self):
self.__changeVehicleSetting('extinguisher', None)
return
def _populate(self):
super(DamagePanel, self)._populate()
self.__tankIndicator = _TankIndicatorCtrl(self.flashObject)
ctrl = g_sessionProvider.shared.vehicleState
if ctrl is not None:
ctrl.onVehicleControlling += self.__onVehicleControlling
ctrl.onVehicleStateUpdated += self.__onVehicleStateUpdated
vehicle = ctrl.getControllingVehicle()
if vehicle:
self._updatePlayerInfo(vehicle.id)
self.__onVehicleControlling(vehicle)
for stateID in _STATE_HANDLERS.iterkeys():
value = ctrl.getStateValue(stateID)
if value:
self.__onVehicleStateUpdated(stateID, value)
self.as_setStaticDataS(i18n.makeString(INGAME_GUI.PLAYER_MESSAGES_TANK_IN_FIRE))
g_replayEvents.onPause += self.__onReplayPaused
return
def _dispose(self):
ctrl = g_sessionProvider.shared.vehicleState
if ctrl is not None:
ctrl.onVehicleControlling -= self.__onVehicleControlling
ctrl.onVehicleStateUpdated -= self.__onVehicleStateUpdated
if self.__tankIndicator:
self.__tankIndicator = None
self.__isShow = False
g_replayEvents.onPause -= self.__onReplayPaused
super(DamagePanel, self)._dispose()
return
def _updatePlayerInfo(self, value):
result = g_sessionProvider.getCtx().getPlayerFullNameParts(vID=value, showVehShortName=False)
self.as_setPlayerInfoS(result.playerName, result.clanAbbrev, result.regionCode, result.vehicleName)
def _updateDeviceState(self, value):
self.as_updateDeviceStateS(*value[:2])
def _updateRepairingDevice(self, value):
self.as_updateRepairingDeviceS(*value)
def _updateCrewDeactivated(self, deathZoneID):
self.as_setCrewDeactivatedS()
def _updateDestroyed(self, _ = None):
self.as_setVehicleDestroyedS()
def _updateVehicleMovementState(self, runAnimation):
if runAnimation:
self.as_startVehicleStartAnimS()
else:
self.as_finishVehicleStartAnimS()
def _updateVehicleEngineState(self, runAnimation):
if runAnimation:
self.as_playEngineStartAnimS()
else:
self.as_finishVehicleStartAnimS()
def _updateHealth(self, health):
self.as_updateHealthS(*_getHealthParams(health, self._maxHealth))
def _switching(self, _):
self.as_resetS()
@staticmethod
def __changeVehicleSetting(tag, entityName):
ctrl = g_sessionProvider.shared.equipments
if ctrl is None:
return
else:
result, error = ctrl.changeSettingByTag(tag, entityName=entityName, avatar=BigWorld.player())
if not result and error:
ctrl = g_sessionProvider.shared.messages
if ctrl is not None:
ctrl.onShowVehicleErrorByKey(error.key, error.ctx)
return
def __onVehicleControlling(self, vehicle):
vTypeDesc = vehicle.typeDescriptor
vType = vTypeDesc.type
yawLimits = vehicle_getter.getYawLimits(vTypeDesc)
if yawLimits:
inDegrees = (math.degrees(-yawLimits[0]), math.degrees(yawLimits[1]))
else:
inDegrees = None
if vehicle.isPlayerVehicle:
isAutoRotationOn = vehicle_getter.isAutoRotationOn(vTypeDesc)
else:
isAutoRotationOn = None
self._maxHealth = vTypeDesc.maxHealth
helthStr, helthProgress = _getHealthParams(vehicle.health, self._maxHealth)
self.as_setupS(helthStr, helthProgress, vehicle_getter.getVehicleIndicatorType(vTypeDesc), vehicle_getter.getCrewMainRolesWithIndexes(vType.crewRoles), inDegrees, vehicle_getter.hasTurretRotator(vTypeDesc), isAutoRotationOn)
if self.__tankIndicator:
app = g_appLoader.getDefBattleApp()
self.__tankIndicator.setup(app, vehicle, yawLimits)
return
def __onVehicleStateUpdated(self, state, value):
if state not in _STATE_HANDLERS:
return
else:
handler = getattr(self, _STATE_HANDLERS[state], None)
if handler and callable(handler):
if value is not None:
handler(value)
else:
handler()
return
def __onReplayPaused(self, _):
self.as_setPlaybackSpeedS(BattleReplay.g_replayCtrl.playbackSpeed)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\battle\shared\damage_panel.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:49:54 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
4a14534207f27c084ecc4c14200ac529bc1d2ea5 | df1254b56f35b24644e00493c50d4b6eb3c15b7b | /colour/characterisation/datasets/cameras/dslr/__init__.py | d89d5c22d6e01606bc0258e0b8f6be74b28a1359 | [
"BSD-3-Clause"
] | permissive | colour-science/colour | 908400b227cf81668675e41099256ce50b23ae4b | 1fdf3b3042922e8d4f86b989b00a06e7e5d81102 | refs/heads/develop | 2023-09-01T23:17:07.186869 | 2023-08-26T09:40:45 | 2023-08-26T09:40:45 | 17,114,363 | 1,756 | 301 | BSD-3-Clause | 2023-09-14T10:24:37 | 2014-02-23T18:55:40 | Python | UTF-8 | Python | false | false | 111 | py | from .sensitivities import MSDS_CAMERA_SENSITIVITIES_DSLR
__all__ = [
"MSDS_CAMERA_SENSITIVITIES_DSLR",
]
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
b98b5fab53f246bfb7259d79aeeba263873e17d1 | 18d223e5ea590e60bc791987034276eed2651721 | /sk1-tt/lesson2-data-processing/c5-put-it-all-together/c50_pipeline_sample.py | 539f7210c85f09ecf85bf1e3d45298cb7f22b35d | [] | no_license | sonicfigo/tt-sklearn | 83b419b4f8984fc63ef41bf2af5b682477350992 | 8e473e958b0afc6154ba3c4dee818fd4da8f504b | refs/heads/master | 2020-03-26T16:07:59.758723 | 2018-09-25T06:28:47 | 2018-09-25T06:28:47 | 145,084,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | # coding=utf-8
"""
非官网,网络帖子里的书写例子
除了最后一个学习器之外,前面的所有学习器必须提供transform方法,
该方法用于数据转化,如:
- 归一化
- 正则化
- 特征提取
若没有,就异常
"""
from sklearn.datasets import load_digits
from sklearn import cross_validation
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
m2 = LogisticRegression(C=1)
def test_Pipeline_ex(data):
m1 = LinearSVC(C=1, penalty='l1', dual=False)
pipeline = Pipeline(steps=[('Linear_SVM', m1),
('LogisticRegression', m2)])
x_train, x_test, y_train, y_test = data
pipeline.fit(x_train, y_train)
print('name steps:', pipeline.named_steps)
print('Pipeline Score:', pipeline.score(x_test, y_test))
"""
工作流程:先进行pca降为,然后使用Logistic回归,来分类
"""
def test_Pipeline_ok(data):
pipeline = Pipeline(steps=[('PCA', PCA()),
('LogisticRegression', m2)])
x_train, x_test, y_train, y_test = data
pipeline.fit(x_train, y_train)
print('name steps:', pipeline.named_steps)
print('Pipeline Score:', pipeline.score(x_test, y_test))
if __name__ == '__main__':
data = load_digits()
X = data.data
y = data.target
try:
test_Pipeline_ex(train_test_split(X, y, test_size=0.25,
random_state=0, stratify=y))
except BaseException as ex:
print('\n===================error:')
print(ex)
print('\n===================ok:')
test_Pipeline_ok(train_test_split(X, y, test_size=0.25,
random_state=0, stratify=y))
| [
"sonic821113@gmail.com"
] | sonic821113@gmail.com |
d8286771a33339832c9f7ff9aed4b14328bef07b | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_26651.py | e952632bd98e2215ebc2547d55394bc34232afd9 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | # best-fit wireframe in python
griddata()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
c616d088f24594e7ea52799570d97027ccf70e4c | f4d4111c7e51bb2c66ea73198b3f99458ba5822f | /Aula 7/ex012.py | 940e268c61dac38b6eff058984d2498b18da2f33 | [] | no_license | LukeBreezy/Python3_Curso_Em_Video | 871b98b4e56db356fc24f2e953ad33e8928ba118 | 34ad1b5037f916964bde99b500a28aed86f18e39 | refs/heads/main | 2023-03-03T03:33:44.805522 | 2021-02-11T19:52:13 | 2021-02-11T19:52:13 | 338,129,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | prod = float(input('Qual o valor do produto? R$ '))
promo = prod * 0.95
print('O produto com o valor de R$ {:.2f}, na promoção com 5% de desconto fica R$ {:.2f}.'.format(prod, promo).replace('.', ',', 1)) | [
"lkslukas23@gmail.com"
] | lkslukas23@gmail.com |
1570ad62bbaf6ae4817b1160b2254c7b7ca68faa | 43c863fbab46daa09acc4bb178292145a6776929 | /pathfinder/terminal/phybeast/utils/prepare_metadata/commands.py | fe7033acb2c3b3cac2743fc5e694f34661d93649 | [
"MIT"
] | permissive | pf-core/pf-core | be034a420e084e416791c98b659b757c3d7e88c3 | 0caf8abde968b959be2284518f7dc951ba680202 | refs/heads/master | 2020-07-29T04:53:25.306031 | 2019-09-24T05:44:54 | 2019-09-24T05:44:54 | 209,677,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import click
from pathlib import Path
from pathfinder.utils import phybeast_prepare_metadata_file
@click.command()
@click.option(
"--meta_data", "-m", default="input_alignment.fasta", type=Path,
help="Input meta data file, tab-delimited, includes: name, date columns.",
)
@click.option(
"--prep", "-p", default="lsd2", help="Prepare metadata file for one of: lsd2, treetime.", type=str,
)
@click.option(
"--output", "-o", default="lsd2.meta", help="Output path of prepared meta data file.", type=Path,
)
def prepare_metadata(meta_data, prep, output):
""" Randomise the dates in a meta data file with columns: name, date """
phybeast_prepare_metadata_file(meta_file=meta_data, prep=prep, output_file=output)
| [
"eikejoachim.steinig@my.jcu.edu.au"
] | eikejoachim.steinig@my.jcu.edu.au |
0a0d1ba92277fb656ae7de2143df0380583d70dc | ca0c3c1cdfdd714c7780c27fcecd4a2ae39d1474 | /src/fmf/apps/news/migrations/0010_auto__add_field_news_category.py | daa06d37ddb7c686a069b09ab5a6ea086d5e4122 | [] | no_license | vasyabigi/fmf | fce88a45fb47f3f7652995af40b567ffdf27a4a0 | 988ba668f3ce6da2670b987a1eeae3c87761eac5 | refs/heads/master | 2021-01-23T07:29:52.185306 | 2012-08-27T13:11:51 | 2012-08-27T13:11:51 | 2,803,493 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'News.category'
db.add_column('news_news', 'category',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'News.category'
db.delete_column('news_news', 'category')
models = {
'news.event': {
'Meta': {'ordering': "('-date_to',)", 'object_name': 'Event'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_from': ('django.db.models.fields.DateField', [], {}),
'date_to': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {}),
'short_description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'news.news': {
'Meta': {'ordering': "('position',)", 'object_name': 'News'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_description': ('django.db.models.fields.TextField', [], {}),
'short_description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'news.newsimage': {
'Meta': {'ordering': "('position',)", 'object_name': 'NewsImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'news': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['news.News']"}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['news'] | [
"vasyl.stanislavchuk@djangostars.com"
] | vasyl.stanislavchuk@djangostars.com |
fff6dfaf784645d56848142b12192e76bdd9d750 | 2696bd485fd09f8b0199f98972163e1140793fd1 | /ems/errorplugin.py | 91c8226da8423505c58c9beebd227ba9a114261d | [
"MIT"
] | permissive | mtils/ems | 24b192faf1d03f78cb9c930193051666a453d18b | a958177d1474828e1d892dda20f4be68869e0483 | refs/heads/master | 2020-05-30T04:37:52.866679 | 2016-10-04T07:30:42 | 2016-10-04T07:30:42 | 30,531,077 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | '''
Created on 28.11.2010
@author: michi
'''
from abc import ABCMeta,abstractmethod
import ems.errorhandler
class ErrorPlugin(object):
'''
classdocs
'''
__metaclass__ = ABCMeta
@abstractmethod
def notify(self,caller,eventName,params):
pass
def getHandler(self):
return self.__handler
def setHandler(self, value):
if not isinstance(value, ems.errorhandler):
raise TypeError("The Errorhandler has to by class or subclass of ems.errorhandler")
self.__handler = value
def delHandler(self):
del self.__handler
handler = property(getHandler, setHandler, delHandler, "emitter's docstring")
| [
"mtils@web-utils.de"
] | mtils@web-utils.de |
0d47747a40d0c7c0273029effd0c1b8334da506e | c2ee9d6d84e2270ba4c9d6062460a2be0ff5f19c | /674. Longest Continuous Increasing Subsequence.py | 4e450a4b5cac87731b991e0dd25d8a1e2656db08 | [] | no_license | Peiyu-Rang/LeetCode | 0dd915638e8c41c560952d86b4047c85b599d630 | f79886ed3022664c3291e4e78129bd8d855cf929 | refs/heads/master | 2021-11-27T23:48:39.946840 | 2021-11-09T12:47:48 | 2021-11-09T12:47:48 | 157,296,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 23:00:52 2021
@author: Caven
"""
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
res = 0
curr_len = 0
left = 0
right = 0
n = len(nums)
while right < n:
if left == right or nums[right] > nums[right - 1]:
right +=1
else:
curr_len = right - left
res = max(res, curr_len)
left = right
right +=1
curr_len = right - left
res = max(res, curr_len)
return res | [
"prang3@gatech.edu"
] | prang3@gatech.edu |
17d39886af1a592ec0dccba77fce4f04b761ae65 | a979aeeb72f46a74a2d59ae8be88ee1553fe1419 | /learn_jinja2/p02_loader.py | 091acb5be0ba0fc13e952ecdc0269798dc0e3a91 | [] | no_license | MacHu-GWU/learn_jinja2-project | 6426db19c222fd58f7abf906911bd54afce694d6 | ec343516bf98a8b05a717d6030807237e47e8e48 | refs/heads/master | 2021-01-11T17:55:03.757324 | 2017-02-13T21:29:29 | 2017-02-13T21:29:29 | 79,875,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
loader是jinja2中用于从文件中读取模板代码的中间类。
- FileSystemLoader: 指定一个目录, 在目录下根据文件名寻找模板。
- PackageLoader: 指定一个安装好的python包, 在 ``package_name.package_path`` 目录
下寻找模板。
- DictLoader: 使用 ``{key: source}`` 的形式读取模板。
- FunctionLoader: 使用一个函数, 接受key为输入, 返回模板源代码。
ref: http://jinja.pocoo.org/docs/2.9/api/#loaders
""" | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
4eed77ded43f3a75192036d14cd1d98c2408c5b9 | df23ee09fffae3ea6a32925f80996f474aefabb9 | /src/myclips/rete/tests/VariableBindingTest.py | 50ad939a4d62e44007c29295d99499e69bb6b296 | [] | no_license | stefano-bragaglia/myclips | 9e5e985d4b67250723012da4b2ed720e2bfeac72 | bb7b8dc6c1446235777c0e4ebf23e641d99ebc03 | refs/heads/master | 2022-08-01T10:09:45.019686 | 2020-05-27T19:59:47 | 2020-05-27T19:59:47 | 267,410,326 | 0 | 0 | null | 2020-05-27T19:38:28 | 2020-05-27T19:38:28 | null | UTF-8 | Python | false | false | 3,435 | py | '''
Created on 24/lug/2012
@author: Francesco Capozzo
'''
from myclips.rete.tests.BetaTest import BetaTest
import myclips
from myclips.rete.tests.locations import VariableReference
from myclips.rete.tests import getTokenAnchestor
class VariableBindingTest(BetaTest):
'''
Make sure that variable binding is consistent
through multiple variable locations
'''
def __init__(self, reference):
'''
Create a new VariableBindingTest. This test make sure
that if a variable is used in multiple locations,
it has a consistent value across all locations
@param reference: is a location with a reference to a previous binded variable
@type reference: VariableReference
@return: False if test fail, True otherwise
@rtype: Boolean
'''
self._reference = reference
# self._wmePositionIndex = wmePositionIndex
# self._tokenRelativeIndex = tokenRelativeIndex
# self._tokenPositionIndex = tokenPositionIndex # this is an array of position.
# This allow to go deep inside fact-index
# and multifield-index in fact index
@property
def reference(self):
return self._reference
def isValid(self, token, wme):
reference = self._reference
assert isinstance(reference, VariableReference)
try:
# if token relative index is 0, then the test is an intra-element
# test performed in the beta network
# this means that the wme where the variable was found first
# is the same where the variable was found again
if reference.relPatternIndex != 0:
nToken = getTokenAnchestor(token, (-1 * reference.relPatternIndex) - 1)
# get the exact wme value of the token where variable for used first
valueInTokenWme = reference.reference.toValue(nToken.wme)
else:
valueInTokenWme = reference.reference.toValue(wme)
# get the value in current wme there variable must have the same value
valueInWme = reference.toValue(wme)
# when i've found them all
# i can compare them
# for eq or neq based on reference.isNegative value
eqResult = (valueInTokenWme == valueInWme)
return eqResult if reference.isNegative is not True else not eqResult
except KeyError:
# it's ok. If a catch this exception
# means that the wme has not an index at all
# so no value can be tested.
# This make the test fail
return False
except Exception, e:
# Another type of exception catch
# better log this
myclips.logger.warning("Unexpected exception caught in %s: token=%s, wme=%s, exception=%s", self, token, wme, repr(e))
# anyway test failed
return False
def __str__(self, *args, **kwargs):
return str(self._reference)
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self._reference == other._reference
def __neq__(self, other):
return not self.__eq__(other) | [
"ximarx@gmail.com"
] | ximarx@gmail.com |
73467450fa0036f7742a72b47a6eb21901b226b2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2634/59018/259642.py | 59bb88db2941ce3defa4ee3eae4a3aeb0e918c70 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from fractions import Fraction
a1=input()[1:-1].split(',')
a=[int(y) for y in a1]
b=[]
k=int(input())
for j in range(len(a)-1):
for i in range(j+1,len(a)):
b.append(Fraction(a[j],a[i]))
b.sort()
print(b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d5f98c309f1abe4ac6a2df5d704373d1c64f5461 | 08e0fdf4c9b516b96e65b94c2cc9dbda61af6f50 | /screensaver.py | 61830f6042b24fb282196a4463be0bd8895846e4 | [
"CC0-1.0"
] | permissive | von/scripts | 1d1f8e7310ee5f0f48141a199225ef00513216ff | bef4acf5c5e99a74e1759045c13496708f5430d4 | refs/heads/main | 2023-04-06T03:34:19.770846 | 2023-04-01T23:50:25 | 2023-04-01T23:50:59 | 1,017,631 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | #!/usr/bin/env python3
"""Manage the OSX screensaver
OSX seems to only accept following values for screensaver idle delay:
60, 120, 300, 600, 1200, 1800, 6000
"""
import argparse
import subprocess
import sys
import time
# Default time to delay screensaver in seconds.
# Note, see --delay, this must be one of set of values accepted by OSC.
DEFAULT_DELAY = 1200
# Default time to suspend in seconds
DEFAULT_SUSPEND_TIME = 3600
def killall_cfprefsd():
"""Restart cfprefsd. Needed to causes defaults to be read.
Kudos: https://superuser.com/a/914884"""
return subprocess.call(["killall", "cfprefsd"])
def set_idleTime(seconds):
"""Set idleTime for screensaver.
0 disables the screensaver.
Otherwise, OSX seems to only accept following values:
60, 120, 300, 600, 1200, 1800, 6000
Anything else defaults to 1200.
"""
rc = subprocess.call(["defaults",
"-currentHost",
"write",
"com.apple.screensaver",
"idleTime",
"-int",
str(seconds)
])
if rc:
return rc
rc = killall_cfprefsd()
return rc
def get_idleTime():
"""Get idleTime for screensaver in seconds"""
time_str = subprocess.check_output(
["defaults",
"-currentHost",
"read",
"com.apple.screensaver",
"idleTime"
])
time = int(time_str.strip())
return time
def cmd_disable(args):
"""Disable screensaver"""
args.print_func("Disabling screensaver")
return set_idleTime(0)
def cmd_enable(args):
"""Enable screensaver
If args.time is set, set delay for screensave to args.time seconds.
See set_idleTime() for details."""
delay = args.delay
args.print_func("Enabling screensaver (delay: {}s)".format(delay))
return set_idleTime(delay)
def cmd_get(args):
"""Print screensaver timeout"""
args.print_func(get_idleTime())
return 0
def cmd_suspend(args):
"""Suspend screensaver
If args.delay is set, suspend for args.delay seconds, else one hour"""
suspend_time = args.time
delay = get_idleTime()
args.print_func(
"Suspending screensaver for {} seconds".format(suspend_time))
rc = set_idleTime(0)
if rc:
return rc
time.sleep(suspend_time)
args.print_func(
"Restoring screensaver ({}s)".format(delay))
rc = set_idleTime(delay)
return(rc)
def main(argv=None):
# Do argv default this way, as doing it in the functional
# declaration sets it at compile time.
if argv is None:
argv = sys.argv
# Argument parsing
parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# To have --help print defaults with trade-off it changes
# formatting, use: ArgumentDefaultsHelpFormatter
)
# TODO: If no command given, an error results. Should print help.
parser.set_defaults(
cmd_func=None,
print_func=print,
)
parser.add_argument("--version", action="version", version="%(prog)s 1.0")
subparsers = parser.add_subparsers(help="Commands")
# disable command
parser_disable = subparsers.add_parser("disable",
help="disable screensaver")
parser_disable.set_defaults(cmd_func=cmd_disable)
# enable command
parser_enable = subparsers.add_parser("enable",
help="enable screensaver")
# delay command
parser_enable.set_defaults(
cmd_func=cmd_enable,
delay=DEFAULT_DELAY
)
parser_enable.add_argument("delay",
metavar="seconds",
nargs='?',
type=int,
# OSX seems to only accept these values
# anything else defaults to 1200
choices=[60, 120, 300, 600,
1200, 1800, 6000])
# 'get' command: display idle time
parser_disable = subparsers.add_parser("get",
help="get screensave idle time")
parser_disable.set_defaults(cmd_func=cmd_get)
# suspend command
parser_suspend = subparsers.add_parser("suspend",
help="suspend screensaver")
parser_suspend.set_defaults(
cmd_func=cmd_suspend,
time=DEFAULT_SUSPEND_TIME,
)
parser_suspend.add_argument("time",
metavar="seconds",
nargs='?',
type=int)
args = parser.parse_args()
return args.cmd_func(args)
if __name__ == "__main__":
sys.exit(main())
| [
"von@vwelch.com"
] | von@vwelch.com |
f49abc3e33c7bf8bfd68d36cf057d21a6a3eb7a1 | 07b249d8b26fc49f1268798b3bd6bdcfd0b86447 | /0x11-python-network_1/10-my_github.py | f3d952b775ddf198c746692937a4ff8fe84c66f8 | [] | no_license | leocjj/holbertonschool-higher_level_programming | 544d6c40632fbcf721b1f39d2453ba3d033007d6 | 50cf2308d2c9eeca8b25c01728815d91e0a9b784 | refs/heads/master | 2020-09-28T23:21:13.378060 | 2020-08-30T23:45:11 | 2020-08-30T23:45:11 | 226,889,413 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | #!/usr/bin/python3
"""
script that takes your Github credentials (username and password) and uses
the Github API to display your id
"""
if __name__ == "__main__":
import requests
from sys import argv
r = requests.get('https://api.github.com/user', auth=(argv[1], argv[2]))
print(r.json().get('id'))
| [
"leocj@hotmail.com"
] | leocj@hotmail.com |
270847f71701b79d022b44eb6aa6262c706cf026 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/YoloV3_ID1790_for_PyTorch/mmdet/models/losses/ae_loss.py | 1cedb5ed11153305ccd17816717d025324bbb5ff | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,350 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
818560c8ec120e81b3fed0fc5e3cb1e91747f0ba | f05286a2e25950c32c3773f399983b25c87b76b6 | /Setup/SpyderShortcuts.py | fc37c8fb49a51083fab7e60d5ac62258181b1488 | [] | no_license | ajpiter/PythonProTips | cdeaf65771a3f7b21ecd946463e4605a061b9b90 | 5ca6238dfcf248251d6933a313af0ac831ec0117 | refs/heads/master | 2021-07-16T05:30:32.996954 | 2021-04-05T07:44:52 | 2021-04-05T07:44:52 | 92,957,252 | 0 | 1 | null | 2021-01-20T04:21:20 | 2017-05-31T14:53:43 | Python | UTF-8 | Python | false | false | 1,925 | py | #Shortcuts in Spyder
Ctrl-Enter* executes the current cell (menu entry Run ‣ Run cell). A cell is defined as the code between two lines which start with the characters #%%, # %% or # <codecell>.
Shift-Enter* executes the current cell and advances the cursor to the next cell (menu entry Run ‣ Run cell and advance).
Cells are useful for breaking large files or long blocks of code into more manageable chunks. Like those in an IPython notebook, each cell can be run independently.
Alt-Up* moves the current line up. If multiple lines are highlighted, they are moved up together. Alt-Down* works correspondingly, moving line(s) down.
Ctrl-LeftMouseButton or Alt-G* on a function/method in the Editor opens a new Editor tab showing the definition of that function.
Shift-Ctrl-Alt-M* maximizes the current window (or changes the size back to normal if pressed in a maximized window).
Ctrl-Shift-F* activates the Find in Files pane, allowing grep-like searches across all files in a specified scope.
Ctrl - = will increase the font size in the Editor or the Console, whereas Ctrl - - will decrease it.
The font face and size for other parts of the UI can be set under Preferences ‣ General ‣ Appearance ‣ Fonts.
Ctrl-S* in the Editor saves the file currently being edited. This also forces various warning triangles in the left column of the Editor to be updated (otherwise they update every 2.5 seconds by default, which is also configurable).
Ctrl-S* in the Console saves the current IPython session as an HTML file, including any figures that may be displayed inline. This is useful as a quick way of recording what has been done in a session.
(It is not currently possible to load this saved record back into the session -- if you need functionality like this, look for the IPython Notebook).
Ctrl-I* when pressed while the cursor is on an object opens documentation for that object in the help pane.
| [
"noreply@github.com"
] | ajpiter.noreply@github.com |
dd99712e45f55c8a48dd060561422da34ed7e605 | 3433314089e976a121e0a4ff7320d1214faabc8b | /test_autoarray/plot/mapper_rectangular/image_pixel_indexes.py | 0c1ee0a36e3763d8e3a91f0d7aff5ec9206804bc | [
"MIT"
] | permissive | Sketos/PyAutoArray | ab7a63543a35401560ee575c4a8ede7a2561d743 | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | refs/heads/master | 2021-02-12T19:06:17.247806 | 2020-04-10T13:15:00 | 2020-04-10T13:15:00 | 244,619,959 | 0 | 0 | MIT | 2020-03-03T17:21:03 | 2020-03-03T11:35:40 | Python | UTF-8 | Python | false | false | 666 | py | import autoarray as aa
import autoarray.plot as aplt
grid_7x7 = aa.grid.uniform(shape_2d=(7, 7), pixel_scales=0.3)
grid_3x3 = aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0)
rectangular_grid = aa.grid_rectangular.overlay_grid(grid=grid_3x3, shape_2d=(3, 3))
rectangular_mapper = aa.mapper(grid=grid_7x7, pixelization_grid=rectangular_grid)
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[0, 1])
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[[0, 1]])
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[[0, 1], [2]])
aplt.mapper_obj(
mapper=rectangular_mapper, image_pixel_indexes=[[(0, 0), (0, 1)], [(1, 2)]]
)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
548f5e85c5fe66810e02b1afadde3bb1c33ad383 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/formatter/spaceWithingCallParentheses.py | 3e69dacbe4e4cc78a924dcb3996d43b67ad7fe7a | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 20 | py | func(1, 2)
empty()
| [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
cbd326fd284186fd098598ff3f968b2aa2c2310d | 08120ee05b086d11ac46a21473f3b9f573ae169f | /gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/document_renderers/renderer.py | 82b830eae3f231cbb79c97f339a7a7155eff86c8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | harrystaley/TAMUSA_CSCI4349_Week9_Honeypot | 52f7d5b38af8612b7b0c02b48d0a41d707e0b623 | bd3eb7dfdcddfb267976e3abe4c6c8fe71e1772c | refs/heads/master | 2022-11-25T09:27:23.079258 | 2018-11-19T06:04:07 | 2018-11-19T06:04:07 | 157,814,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud SDK markdown document renderer base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import io
from googlecloudsdk.core import log
from googlecloudsdk.core.resource import resource_printer
import six
from six.moves import range # pylint: disable=redefined-builtin
# Font Attributes.
BOLD, ITALIC, CODE = list(range(3))
class TableColumnAttributes(object):
"""Markdown table column attributes.
Attributes:
align: Column alignment, one of {'left', 'center', 'right'}.
label: Column heading label string.
width: Minimum column width.
"""
def __init__(self, align='left', label=None, width=0):
self.align = align
self.label = label
self.width = width
class TableAttributes(object):
"""Markdown table attributes.
Attributes:
box: True if table and rows framed by box.
columns: The list of column attributes.
heading: The number of non-empty headings.
"""
def __init__(self, box=False):
self.box = box
self.heading = 0
self.columns = []
def AddColumn(self, align='left', label='', width=0):
"""Adds the next column attributes to the table."""
if label:
self.heading += 1
self.columns.append(
TableColumnAttributes(align=align, label=label, width=width))
def GetPrintFormat(self):
"""Constructs and returns a resource_printer print format."""
fmt = ['table']
attr = []
if self.box:
attr += 'box'
if not self.heading:
attr += 'no-heading'
if attr:
fmt += '[' + ','.join(attr) + ']'
fmt += '('
for index, column in enumerate(self.columns):
if index:
fmt += ','
fmt += '[{}]:label={}:align={}'.format(
index, repr(column.label or '').lstrip('u'), column.align)
if column.width:
fmt += ':width={}'.format(column.width)
fmt += ')'
return ''.join(fmt)
@six.add_metaclass(abc.ABCMeta)
class Renderer(object): # pytype: disable=ignored-abstractmethod
r"""Markdown renderer base class.
The member functions provide an abstract document model that matches markdown
entities to output document renderings.
Attributes:
_blank: True if the output already contains a blank line. Used to avoid
sequences of 2 or more blank lines in the output.
_font: The font attribute bitmask.
_indent: List of left indentations in characters indexed by _level.
_lang: ```lang\n...\n``` code block language. None if not in code block,
'' if in code block with no explicit lang specified.
_level: The section or list level counting from 0.
_out: The output stream.
_title: The document tile.
_width: The output width in characters.
"""
def __init__(self, out=None, title=None, width=80):
self._blank = True
self._font = 0
self._indent = []
self._lang = None
self._level = 0
self._out = out or log.out
self._title = title
self._width = width
def Blank(self):
"""The last output line is blank."""
self._blank = True
def Content(self):
"""Some non-blank line content was added to the output."""
self._blank = False
def HaveBlank(self):
"""Returns True if the last output line is blank."""
return self._blank
def Entities(self, buf):
"""Converts special characters to their entity tags.
This is applied after font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Escape(self, buf):
"""Escapes special characters in normal text.
This is applied before font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Finish(self):
"""Finishes all output document rendering."""
return None
def Font(self, unused_attr, unused_out=None):
"""Returns the font embellishment string for attr.
Args:
unused_attr: None to reset to the default font, otherwise one of BOLD,
ITALIC, or CODE.
unused_out: Writes tags line to this stream if not None.
Returns:
The font embellishment string.
"""
return ''
def SetLang(self, lang):
"""Sets the ```...``` code block language.
Args:
lang: The language name, None if not in a code block, '' is no explicit
language specified.
"""
self._lang = lang
def Line(self):
"""Renders a paragraph separating line."""
pass
def Link(self, target, text):
"""Renders an anchor.
Args:
target: The link target URL.
text: The text to be displayed instead of the link.
Returns:
The rendered link anchor and text.
"""
if text:
if target and '://' in target:
# Show non-local targets.
return '{0} ({1})'.format(text, target)
return text
if target:
return target
return '[]()'
def TableLine(self, line, indent=0):
"""Adds an indented table line to the output.
Args:
line: The line to add. A newline will be added.
indent: The number of characters to indent the table.
"""
self._out.write(indent * ' ' + line + '\n')
def Table(self, table, rows):
"""Renders a table.
Nested tables are not supported.
Args:
table: A TableAttributes object.
rows: A list of rows where each row is a list of column strings.
"""
self.Line()
indent = self._indent[self._level].indent + 2
buf = io.StringIO()
resource_printer.Print(rows, table.GetPrintFormat(), out=buf)
for line in buf.getvalue().split('\n')[:-1]:
self.TableLine(line, indent=indent)
self.Content()
self.Line()
| [
"staleyh@gmail.com"
] | staleyh@gmail.com |
8a9acded12be8b653aa5df5f824ffced7e8b7321 | 9a7a7e43902b6bc5a9e96933da8814acf3f318a3 | /Demo_Pytest/test_case/test_case2/test_demo1.py | 16ff01ee52ffb99c288240934acec7c8936d1bd5 | [] | no_license | liuchangfu/python_script | 9684d512f4bb09f37585e3fc56329be2ea8d6eb5 | 73f0e71364fc2271626e0deff54b4079ad92390c | refs/heads/master | 2020-03-15T16:05:47.624545 | 2018-06-08T10:44:17 | 2018-06-08T10:44:17 | 132,226,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | import pytest
def func(x):
return x+1
def test_func1():
assert func(3) == 4
def test_func2():
assert func(3) == 3
def test_func3():
assert func(3) != 2
if __name__ == '__main__':
pytest.main() | [
"shift_1220@163.com"
] | shift_1220@163.com |
be9534885c37fcd145ac76851d1034085cff3e71 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/numpy/core/tests/test_overrides.py | d7809428f80a8bd0c97c84402be95d52b083fb73 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a916bc00e3819de8d2e997d6eef3f783e8e2a05748c01049de162b3d95f2ee4b
size 13196
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
4c4156b93204bb083c1571b0060b3fe1f1a79885 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/catboost/test.py | 41cd5bcf429609927b891844fb882126cd2aaa59 | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | import catboost
| [
"kamadorueda@gmail.com"
] | kamadorueda@gmail.com |
d9ed2fd3233c96c9b3ace22bf68412e6cc5af7a6 | 60618d48e09a140926d97b01cb9b6f76fcc65703 | /others/Card/cards_main.py | be2e8585610bcbc712cae5e93d806cbde9aa1693 | [] | no_license | Incipe-win/Python | ca8f36cc8785eb13512f71a3cf10149d4e1b855e | 5bab36b90591c74dedb6ead3484a279b90a1bcbd | refs/heads/master | 2021-01-07T08:11:42.293541 | 2020-12-06T09:17:02 | 2020-12-06T09:17:02 | 241,629,236 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | #! /usr/bin/python3
from Card import cards_tools
while True:
cards_tools.show_menu()
choose = input("请输入您的选择:")
if choose in ["1", "2", "3"]:
# 1. 新增名片
if choose == "1":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.new_card()
# 2. 显示全部
elif choose == "2":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.show_all()
# 3. 查询名片
elif choose == "3":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.search_card()
elif choose == "0":
print("您的选择是: %s" % cards_tools.action[int(choose)])
print("欢迎您的使用,祝您生活愉快!")
break
else:
print("输入错误,请重新输入!", end="\n\n")
continue
| [
"whc_9_13@163.com"
] | whc_9_13@163.com |
353c1f2652ec393eed4ab2a925dfb5c730c2ffa6 | 88a2f57b7d660228ca1ac922f0f582910bcacb3d | /algorithm/day03/부분집합.py | 4009ee3e894b07da049d639b1bab33c87493d2b5 | [] | no_license | chelseashin/TIL | adc5ed0bd4ba084e85b74baa9699096a7af5585e | 376b56844985b3ff43b94fa18086a449e6deac69 | refs/heads/master | 2022-12-10T02:13:39.680936 | 2020-11-19T13:18:30 | 2020-11-19T13:18:30 | 162,103,813 | 2 | 0 | null | 2022-12-08T04:53:38 | 2018-12-17T09:11:23 | Jupyter Notebook | UTF-8 | Python | false | false | 954 | py | # 부분집합
bit = [0,0,0,0]
for i in range(2):
bit[0] = i
for j in range(2):
bit[1] = j
for k in range(2):
bit[2] = k
for l in range(2):
bit[3] = l
print(bit)
# 부분집합 2
arr = [1, 2, 3]
n = len(arr)
for i in range(1 << n): # 1<<n : 부분 집합의 개수
for j in range(n): # 원소의 수만큼 비트를 비교함
if i & (1 << j): # i의 j번째 비트가 1이면 j번재 원소 출력
print(arr[j], end = ', ')
print()
print()
# 부분집합(Subset Sum) 문제
arr = [-7, -3, -2, 5, 8]
sum = 0
cnt = 0
for i in range(1, 1 << len(arr)):
sum = 0
for j in range(len(arr)):
if i & (1 << j):
sum += arr[j]
if sum == 0:
cnt += 1
for j in range(len(arr)):
if i & (1 << j):
print(arr[j], end =" ")
print()
print("개수 : {}".format(cnt)) | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
c52109bbe3a17f7efa023a3985d47abef966079d | faefc32258e04fa8ed404f129c6e635345ad2cd7 | /permute_data.py | fb109475cc5fbbf6730cacd3d23a3be2cbe057f5 | [] | no_license | lodhaz/Poisson-Equation-Solving-with-DL | 1c1e309abb186c5b081a4ebae83d3652884dd831 | 38dbc2e7334d71d7c3120a5d2f7452b82d904cef | refs/heads/master | 2020-04-06T15:07:17.232859 | 2018-10-26T07:23:19 | 2018-10-26T07:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # coding: utf-8
# Author: Zhongyang Zhang
# Email : mirakuruyoo@gmail.com
import h5py
import numpy as np
import pickle
root = '/Volumes/Takanashi/Datasets_Repo/POISSON/'#'./TempData/'
DATA_PATH = [root + 'train_data_2.mat', root + 'test_data_2.mat']
train_data = h5py.File(DATA_PATH[0], 'r')
test_data = h5py.File(DATA_PATH[1], 'r')
train_data = dict((key, value) for key, value in train_data.items() if key == 'X_2_train' or key == 'Y_train')
test_data = dict((key, value) for key, value in test_data.items() if key == 'X_2_test' or key == 'Y_test')
train_data_X = np.transpose(train_data['X_2_train'], (3, 2, 1, 0))
train_data_Y = np.transpose(train_data['Y_train'], (2, 1, 0))
test_data_X = np.transpose(test_data['X_2_test'], (3, 2, 1, 0))
test_data_Y = np.transpose(test_data['Y_test'], (2, 1, 0))
train_pairs = [(x, y.T.reshape(-1)) for x, y in zip(train_data_X, train_data_Y)]
test_pairs = [(x, y.T.reshape(-1)) for x, y in zip(test_data_X, test_data_Y)]
pickle.dump(train_pairs, open(root+'train_data_2.pkl', 'wb+'))
pickle.dump(test_pairs, open(root+'test_data_2.pkl', 'wb+'))
| [
"786671043@qq.com"
] | 786671043@qq.com |
f5ff2eeb847505f1ee2df77fa7520501b178d23c | d55f8836d27dcbe56ce62623f1a69f33c0fd950d | /UpWork_Projects/andy_upwork/familyDollar/familyDollar/settings.py | 6582f744414d292a2be638e7fd1276830914c363 | [
"MIT"
] | permissive | SurendraTamang/Web-Scrapping | f12f0f8fcb4b6186ecab38c8036181e4d1560bed | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | refs/heads/master | 2022-11-11T10:32:31.405058 | 2020-06-17T19:34:33 | 2020-06-17T19:34:33 | 273,258,179 | 0 | 1 | null | 2020-06-18T14:20:43 | 2020-06-18T14:20:42 | null | UTF-8 | Python | false | false | 3,387 | py | # -*- coding: utf-8 -*-
# Scrapy settings for familyDollar project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['familyDollar.spiders']
NEWSPIDER_MODULE = 'familyDollar.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'familyDollar.middlewares.FamilydollarSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_selenium.SeleniumMiddleware': 800,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'familyDollar.pipelines.FamilydollarPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = "../chromedriver_windows"
#SELENIUM_DRIVER_ARGUMENTS=['--headless']
SELENIUM_DRIVER_ARGUMENTS=[]
FEED_EXPORT_ENCODING = 'utf-8' | [
"p.byom26@gmail.com"
] | p.byom26@gmail.com |
acc5ce91a9ee2aafef0938a2e4fb9c066bef1e06 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /r8lib/roots_to_r8poly.py | 74d2b4ce32f489ed8923d11cdc132ba501f2d69d | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #! /usr/bin/env python
#
def roots_to_r8poly ( n, x ):
#*****************************************************************************80
#
## ROOTS_TO_R8POLY converts polynomial roots to polynomial coefficients.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 16 April 2005
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of roots specified.
#
# Input, real X(N), the roots.
#
# Output, real C(1:N+1), the coefficients of the polynomial.
#
import numpy as np
#
# Initialize C to (0, 0, ..., 0, 1).
# Essentially, we are setting up a divided difference table.
#
c = np.zeros ( n + 1 )
c[n] = 1.0
#
# Convert to standard polynomial form by shifting the abscissas
# of the divided difference table to 0.
#
for j in range ( 1, n + 1 ):
for i in range ( 1, n + 2 - j ):
c[n-i] = c[n-i] - x[n+1-i-j] * c[n-i+1]
return c
def roots_to_r8poly_test ( ):
#*****************************************************************************80
#
## ROOTS_TO_R8POLY_TEST tests ROOTS_TO_R8POLY.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 March 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
from r8poly_print import r8poly_print
from r8vec_print import r8vec_print
n = 5
x = np.array ( [ \
[ 1.0 ], \
[ -4.0 ], \
[ 3.0 ], \
[ 0.0 ], \
[ 3.0 ] ] );
print ( '' )
print ( 'ROOTS_TO_R8POLY_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' ROOTS_TO_R8POLY is given N real roots,' )
print ( ' and constructs the coefficient vector' )
print ( ' of the corresponding polynomial.' )
r8vec_print ( n, x, ' N real roots:' )
c = roots_to_r8poly ( n, x )
r8poly_print ( n, c, ' The polynomial:' )
#
# Terminate.
#
print ( '' )
print ( 'ROOTS_TO_R8POLY_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
roots_to_r8poly_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
c0d6f7df93ff44f22b0fbc7c4c22a28f199d0a8d | bc2a96e8b529b0c750f6bc1d0424300af9743904 | /acapy_client/models/v10_present_proof_module_response.py | 9f0dd0cbcdc952d714e52ad3c2b8f6a7160c032a | [
"Apache-2.0"
] | permissive | TimoGlastra/acapy-client | d091fd67c97a57f2b3462353459780281de51281 | d92ef607ba2ff1152ec15429f2edb20976991424 | refs/heads/main | 2023-06-29T22:45:07.541728 | 2021-08-03T15:54:48 | 2021-08-03T15:54:48 | 396,015,854 | 1 | 0 | Apache-2.0 | 2021-08-14T13:22:28 | 2021-08-14T13:22:27 | null | UTF-8 | Python | false | false | 1,247 | py | from typing import Any, Dict, List, Type, TypeVar
import attr
T = TypeVar("T", bound="V10PresentProofModuleResponse")
@attr.s(auto_attribs=True)
class V10PresentProofModuleResponse:
""" """
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
v10_present_proof_module_response = cls()
v10_present_proof_module_response.additional_properties = d
return v10_present_proof_module_response
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"dbluhm@pm.me"
] | dbluhm@pm.me |
194adeed213f29512491a64c1fe37a13dc9a9895 | a35ffe5fd1d4fe8cb68fe8807c80aa7fec219271 | /6. hafta - open_cv_devam/9_kirmizi_pikselleri_yok_etme.py | ccf49a746ef679a6d0ea1fd3ad436d8d2a9ae1e1 | [] | no_license | huseyin1701/goruntu_isleme | 2a3580ee970265094cd73d5b238676c57013f192 | d4a42cb35be175ac5549611858fc2b42d0eaafc6 | refs/heads/master | 2023-04-23T23:13:38.378867 | 2021-05-10T12:48:07 | 2021-05-10T12:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import cv2 as cv
resim = cv.imread("resim/manzara.jpg")
print(resim.shape)
print(resim[0, 0])
for a in resim:
for b in a:
#print(b)
b[2] = 0
cv.imshow("a", resim)
cv.waitKey(0)
| [
"huseyingunes@gmail.com"
] | huseyingunes@gmail.com |
850295881c10098324f479559b5f35013e9d233c | b1bbfe2fa31d761d6a4658b022d344b5a0cb7dd8 | /2-add_two_numbers.py | 36e1dca017b8827f1873a0a36de3dad133a8f487 | [] | no_license | stevestar888/leetcode-problems | f5917efc3516f8e40d5143b4dc10583c1e22dabd | 844f502da4d6fb9cd69cf0a1ef71da3385a4d2b4 | refs/heads/master | 2022-11-12T05:01:02.794246 | 2022-10-28T16:45:48 | 2022-10-28T16:45:48 | 248,663,356 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
https://leetcode.com/problems/add-two-numbers/
A few tricky cases:
[5]
[5]
[0]
[0]
"""
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
BASE = 10
carry = 0
head = ListNode(0)
curr = head
while l1 or l2 or carry:
l1_num, l2_num = 0, 0
if l1:
l1_num = l1.val
l1 = l1.next
if l2:
l2_num = l2.val
l2 = l2.next
digit = carry + l1_num + l2_num
#if we had a 1 carry into this digit
if carry == 1:
carry -= 1
#if we need to carry into the next digit
if digit >= 10:
digit %= BASE
carry += 1
# print(digit, carry)
digit_node = ListNode(digit)
curr.next = digit_node
curr = curr.next
return head.next
| [
"noreply@github.com"
] | stevestar888.noreply@github.com |
dbc091ff7c675c3e6c98899dae3ee66141845480 | a39ed5db6c75c9ae1f5e05118794c64102dc5f7a | /2022/15_2/solution_test.py | c8b8d15b3ece0d0f33d2976457bdc0fe9d8b446a | [
"MIT"
] | permissive | budavariam/advent_of_code | b656d5caf5d05113b82357754eb225e61e89ac0d | 635be485ec691f9c0cdeb83f944de190f51c1ba3 | refs/heads/master | 2022-12-25T18:12:00.981365 | 2022-12-20T08:20:51 | 2022-12-20T08:20:51 | 114,570,426 | 1 | 1 | MIT | 2022-12-09T09:29:06 | 2017-12-17T21:36:00 | Python | UTF-8 | Python | false | false | 1,143 | py | """ Advent of code 2022 day 15 / 2 """
import unittest
from solution import solution
class MyTest(unittest.TestCase):
"""Unist tests for actual day"""
def test_basic(self):
"""Test from the task"""
self.assertEqual(
solution(
"""Sensor at x=2, y=18: closest beacon is at x=-2, y=15
Sensor at x=9, y=16: closest beacon is at x=10, y=16
Sensor at x=13, y=2: closest beacon is at x=15, y=3
Sensor at x=12, y=14: closest beacon is at x=10, y=16
Sensor at x=10, y=20: closest beacon is at x=10, y=16
Sensor at x=14, y=17: closest beacon is at x=10, y=16
Sensor at x=8, y=7: closest beacon is at x=2, y=10
Sensor at x=2, y=0: closest beacon is at x=2, y=10
Sensor at x=0, y=11: closest beacon is at x=2, y=10
Sensor at x=20, y=14: closest beacon is at x=25, y=17
Sensor at x=17, y=20: closest beacon is at x=21, y=22
Sensor at x=16, y=7: closest beacon is at x=15, y=3
Sensor at x=14, y=3: closest beacon is at x=15, y=3
Sensor at x=20, y=1: closest beacon is at x=15, y=3""",
20,
),
56000011,
)
if __name__ == "__main__":
unittest.main()
| [
"budavariam@gmail.com"
] | budavariam@gmail.com |
843a9896e1ada59e40bd827a2af39e3d13effdba | 9c7581c3b862174878a5e71609f94b3e5a2de5c9 | /CursoEmVideo/Aula22/ex109/titulo.py | 3136d4230f599c8c02894ae254a298636a7cf2f6 | [
"MIT"
] | permissive | lucashsouza/Desafios-Python | 6d9fdc3500e0d01ce9a75201fc4fe88469928170 | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | refs/heads/master | 2020-06-21T16:49:32.884025 | 2019-07-23T01:23:07 | 2019-07-23T01:23:07 | 143,765,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def titulo(mensagem):
print()
print('~' * (len(mensagem)+1))
print(mensagem)
print('~' * (len(mensagem)+1))
print()
| [
"noreply@github.com"
] | lucashsouza.noreply@github.com |
7f0113de4aed2785b90b4ba6789bc0a244c1ed09 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_sqrt/trend_constant/cycle_5/ar_12/test_artificial_1024_sqrt_constant_5_12_0.py | 9a4bfcc628edeec08dcd54acf3df24173d35377f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 5, transform = "sqrt", sigma = 0.0, exog_count = 0, ar_order = 12);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
8d529d30512884f3d3f678345f7f2c07b0ef5615 | 650076fb94a086e15bdaa5bd2f51ce72df42dce4 | /test/functional/rpc_signrawtransaction.py | 3395fe5a26b85a2f2336d0ede4d5f0e73aa5c243 | [
"MIT"
] | permissive | c0de0x/ErosCore | 548075fe85c46e2bb3946f94361689dbad692da8 | a71767f7ee7105dc83973aac8ac60903b69459c9 | refs/heads/master | 2022-11-25T14:35:59.091923 | 2020-07-30T14:38:39 | 2020-07-30T14:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import ErosTestFramwork
from test_framework.util import *
class SignRawTransactionsTest(ErosTestFramwork):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'xwMWGTnBNUmGxMm8vfAdbL45bWXyVTYctd': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| [
"60665036+ErosCore@users.noreply.github.com"
] | 60665036+ErosCore@users.noreply.github.com |
8a4637739a6eb4ff75ad31da66c99d0566908931 | fae35f9b8061bf3995184efaf34277190a331744 | /satcomum/__init__.py | e86ece2f2468eb7362f0c22773a30ab4194607bd | [
"Apache-2.0"
] | permissive | tiagocardosos/satcomum | 1de27c1b20c59a8059ec7f9f051b695bd9727465 | b42bec06cb0fb0ad2f6b1a2644a1e8fc8403f2c3 | refs/heads/master | 2020-05-05T11:09:21.478347 | 2019-02-19T21:21:18 | 2019-02-19T21:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
#
# satcomum/__init__.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '1.1'
| [
"daniel@base4.com.br"
] | daniel@base4.com.br |
529c198083375fb06105d96692ff3f5250777ec3 | 65b708f0646ea090a4e9bc615cd37fd799bd9bce | /venv/Scripts/pip-script.py | bbde96c7ba7bd14099ef4ca4b4ac8938933fc0e8 | [] | no_license | chrisna2/python-web-scrapping | af803079586c7b798365d23f5667a24d0c6633e8 | 92e74b4985006246f543de87ff26673b94e8c0a8 | refs/heads/master | 2020-07-08T14:40:32.959560 | 2019-08-23T03:19:47 | 2019-08-23T03:19:47 | 203,703,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #!D:\tyn_dev\workspace_pycham\web-scrapping\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"chrisna2@hanmail.net"
] | chrisna2@hanmail.net |
14bcbf43eb9678724906eec240db3f26cfa53cd3 | 2a68b03c923119cc747c4ffcc244477be35134bb | /Algorithm/DFS/dice.py | b52118987a96576b1be898e542042e09aed51736 | [] | no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | class Solution:
def findTarget(self, dices, sides, target):
print("\nnum of dices = %d, num of sides = %d, target = %d" %(dices, sides, target))
results = []
combination = []
found = set()
self.dfs(dices, 0, sides, combination, target, results, found)
return results
def dfs(self, dices, start_index, sides, combination, target, results, found):
if start_index == dices:
if target == 0:
#
# filter duplicates and
# handle corner case like [4] is not a valid combination
# when dices = 2, sides = 4, target = 4
#
if tuple(sorted(combination)) not in found and len(combination) == dices:
results.append(combination.copy())
found.add(tuple(sorted(combination)))
return
for i in range(start_index, dices):
if target <= 0:
return
for side in range(1, sides + 1):
combination.append(side)
self.dfs(dices, i + 1, sides, combination, target - side, results, found)
combination.pop()
solution = Solution()
print(solution.findTarget(3, 5, 10))
print(solution.findTarget(2, 3, 4))
print(solution.findTarget(2, 4, 4))
print(solution.findTarget(3, 4, 4)) | [
"jeremyxuqitao@outlook.com"
] | jeremyxuqitao@outlook.com |
8082f748ca87aeda70c2749fc297b87eb5d1f36e | 140929bb7a81dd76688d5acc923f28aa0ff539d8 | /env/lib/python3.6/site-packages/retro/retro_env.py | 0c3310e4dd863f8643cf81157f3d632dfc57cacd | [
"Apache-2.0"
] | permissive | boodahDEV/Soni-IA | bd4436895c6c61c191e8968a964667af174121cd | c452c0b3df3a3ced4b5027c2abb4f3c22fd0f948 | refs/heads/master | 2020-05-05T01:27:04.810504 | 2019-05-27T06:48:08 | 2019-05-27T06:48:08 | 179,603,282 | 2 | 1 | Apache-2.0 | 2019-05-27T06:38:43 | 2019-04-05T01:39:46 | Python | UTF-8 | Python | false | false | 11,376 | py | import gc
import gym
import gzip
import gym.spaces
import json
import numpy as np
import os
import retro
import retro.data
#import pygame
#from pygame.locals import *
from gym.utils import seeding
gym_version = tuple(int(x) for x in gym.__version__.split('.'))
__all__ = ['RetroEnv']
class RetroEnv(gym.Env):
"""
Gym Retro environment class
Provides a Gym interface to classic video games
"""
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 60.0}
def __init__(self, game, state=retro.State.DEFAULT, scenario=None, info=None, use_restricted_actions=retro.Actions.FILTERED,
record=False, players=1, inttype=retro.data.Integrations.STABLE, obs_type=retro.Observations.IMAGE):
if not hasattr(self, 'spec'):
self.spec = None
self._obs_type = obs_type
self.img = None
self.ram = None
self.viewer = None
self.gamename = game
self.statename = state
self.initial_state = None
self.players = players
metadata = {}
rom_path = retro.data.get_romfile_path(game, inttype)
metadata_path = retro.data.get_file_path(game, 'metadata.json', inttype)
if state == retro.State.NONE:
self.statename = None
elif state == retro.State.DEFAULT:
self.statename = None
try:
with open(metadata_path) as f:
metadata = json.load(f)
if 'default_player_state' in metadata and self.players <= len(metadata['default_player_state']):
self.statename = metadata['default_player_state'][self.players - 1]
elif 'default_state' in metadata:
self.statename = metadata['default_state']
else:
self.statename = None
except (IOError, json.JSONDecodeError):
pass
if self.statename:
self.load_state(self.statename, inttype)
self.data = retro.data.GameData()
if info is None:
info = 'data'
if info.endswith('.json'):
# assume it's a path
info_path = info
else:
info_path = retro.data.get_file_path(game, info + '.json', inttype)
if scenario is None:
scenario = 'scenario'
if scenario.endswith('.json'):
# assume it's a path
scenario_path = scenario
print(str(scenario_path)) ##YO
else:
scenario_path = retro.data.get_file_path(game, scenario + '.json', inttype)
print(str(scenario_path)) ##YO
self.system = retro.get_romfile_system(rom_path)
# We can't have more than one emulator per process. Before creating an
# emulator, ensure that unused ones are garbage-collected
gc.collect()
self.em = retro.RetroEmulator(rom_path)
self.em.configure_data(self.data)
self.em.step()
core = retro.get_system_info(self.system)
self.buttons = core['buttons']
self.num_buttons = len(self.buttons)
self.button_combos = self.data.valid_actions()
try:
assert self.data.load(info_path, scenario_path), 'Failed to load info (%s) or scenario (%s)' % (info_path, scenario_path)
except Exception:
del self.em
raise
if use_restricted_actions == retro.Actions.DISCRETE:
combos = 1
for combo in self.button_combos:
combos *= len(combo)
self.action_space = gym.spaces.Discrete(combos ** players)
elif use_restricted_actions == retro.Actions.MULTI_DISCRETE:
self.action_space = gym.spaces.MultiDiscrete([len(combos) if gym_version >= (0, 9, 6) else (0, len(combos) - 1) for combos in self.button_combos] * players)
else:
self.action_space = gym.spaces.MultiBinary(self.num_buttons * players)
kwargs = {}
if gym_version >= (0, 9, 6):
kwargs['dtype'] = np.uint8
if self._obs_type == retro.Observations.RAM:
shape = self.get_ram().shape
else:
img = [self.get_screen(p) for p in range(players)]
shape = img[0].shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=shape, **kwargs)
self.use_restricted_actions = use_restricted_actions
self.movie = None
self.movie_id = 0
self.movie_path = None
if record is True:
self.auto_record()
elif record is not False:
self.auto_record(record)
self.seed()
if gym_version < (0, 9, 6):
self._seed = self.seed
self._step = self.step
self._reset = self.reset
self._render = self.render
self._close = self.close
def _update_obs(self):
if self._obs_type == retro.Observations.RAM:
self.ram = self.get_ram()
return self.ram
elif self._obs_type == retro.Observations.IMAGE:
self.img = self.get_screen()
return self.img
else:
raise ValueError('Unrecognized observation type: {}'.format(self._obs_type))
##########################################################################################################################
def action_to_array(self, a):
actions = []
for p in range(self.players):
action = 0
if self.use_restricted_actions == retro.Actions.DISCRETE:
'''for combo in self.button_combos:
current = a % len(combo)
a //= len(combo)
action |= combo[current] '''
elif self.use_restricted_actions == retro.Actions.MULTI_DISCRETE:
ap = a[self.num_buttons * p:self.num_buttons * (p + 0)] #es 1
for i in range(len(ap)):
buttons = self.button_combos[i] #El no entra a esta condincion
action |= buttons[ap[i]]
# print ("\n",ap)
else:
ap = a[self.num_buttons * p:self.num_buttons * (p + 1)] #es 1
for i in range(len(ap)):
action |= int(ap[i]) << i
if self.use_restricted_actions == retro.Actions.FILTERED:
action = self.data.filter_action(action)
ap = np.zeros([self.num_buttons], np.uint8)
for i in range(self.num_buttons):
ap[i] = (action >> i) & 1 #es 1
#ap = [0,1,0,0,0,1,0,0,0,0,0,0]
#print ("\n",p)
actions.append(ap)
return actions
#############################################################################################################################
def step(self, a):
if self.img is None and self.ram is None:
raise RuntimeError('Please call env.reset() before env.step()')
for p, ap in enumerate(self.action_to_array(a)):
if self.movie:
for i in range(self.num_buttons):
self.movie.set_key(i, ap[i], p)
self.em.set_button_mask(ap, p)
if self.movie:
self.movie.step()
self.em.step()
self.data.update_ram()
ob = self._update_obs()
rew, done, info = self.compute_step()
return ob, rew, bool(done), dict(info)
def reset(self):
if self.initial_state:
self.em.set_state(self.initial_state)
for p in range(self.players):
self.em.set_button_mask(np.zeros([self.num_buttons], np.uint8), p)
self.em.step()
if self.movie_path is not None:
rel_statename = os.path.splitext(os.path.basename(self.statename))[0]
self.record_movie(os.path.join(self.movie_path, '%s-%s-%06d.bk2' % (self.gamename, rel_statename, self.movie_id)))
self.movie_id += 1
if self.movie:
self.movie.step()
self.data.reset()
self.data.update_ram()
return self._update_obs()
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2**31
return [seed1, seed2]
def render(self, mode='rgb_array', close=False):
if close:
if self.viewer:
self.viewer.close()
return
img = self.get_screen() if self.img is None else self.img
if mode == "rgb_array":
return img
elif mode == "human":
if self.viewer is None:
from gym.envs.classic_control.rendering import SimpleImageViewer
self.viewer = SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen ####################################################33img
def close(self):
if hasattr(self, 'em'):
del self.em
def get_action_meaning(self, act):
actions = []
for p, action in enumerate(self.action_to_array(act)):
actions.append([self.buttons[i] for i in np.extract(action, np.arange(len(action)))])
if self.players == 1:
return actions[0]
#print(actions)
return actions
def get_ram(self):
blocks = []
for offset in sorted(self.data.memory.blocks):
arr = np.frombuffer(self.data.memory.blocks[offset], dtype=np.uint8)
blocks.append(arr)
return np.concatenate(blocks)
def get_screen(self, player=0):
img = self.em.get_screen()
#print("IMAGEN: ",img)
x, y, w, h = self.data.crop_info(player)
if not w or x + w > img.shape[1]:
w = img.shape[1]
else:
w += x
if not h or y + h > img.shape[0]:
h = img.shape[0]
else:
h += y
if x == 0 and y == 0 and w == img.shape[1] and h == img.shape[0]:
return img
return img[y:500, x:600]
def load_state(self, statename, inttype=retro.data.Integrations.DEFAULT):
if not statename.endswith('.state'):
statename += '.state'
with gzip.open(retro.data.get_file_path(self.gamename, statename, inttype), 'rb') as fh:
self.initial_state = fh.read()
self.statename = statename
def compute_step(self):
if self.players > 1:
reward = [self.data.current_reward(p) for p in range(self.players)]
else:
reward = self.data.current_reward()
done = self.data.is_done()
return reward, done, self.data.lookup_all()
def record_movie(self, path):
self.movie = retro.Movie(path, True, self.players)
self.movie.configure(self.gamename, self.em)
if self.initial_state:
self.movie.set_state(self.initial_state)
def stop_record(self):
self.movie_path = None
self.movie_id = 0
if self.movie:
self.movie.close()
self.movie = None
def auto_record(self, path=None):
if not path:
path = os.getcwd()
self.movie_path = path
| [
"boodah21@protonmail.com"
] | boodah21@protonmail.com |
b41ea6ae823b3495b06bde530884580ce3d476c5 | f4e69d05d4bea5198f5bd15c968562fac654c88e | /test/test_certificates_api.py | 593599eedf46635c5eac7c1269f77d51ecbbe987 | [] | no_license | krezreb/openapi-client-otoroshi | 2877ae9230b1ca29024880994420101a232cb906 | 0dafc780777857b9a0d0d8264e215bd6e0557224 | refs/heads/master | 2023-05-06T07:23:45.988523 | 2021-05-27T13:00:18 | 2021-05-27T13:00:18 | 371,374,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | """
Otoroshi Admin API
Admin API of the Otoroshi reverse proxy # noqa: E501
The version of the OpenAPI document: 1.5.0-alpha.14
Contact: oss@maif.fr
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.api.certificates_api import CertificatesApi # noqa: E501
class TestCertificatesApi(unittest.TestCase):
"""CertificatesApi unit test stubs"""
def setUp(self):
self.api = CertificatesApi() # noqa: E501
def tearDown(self):
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_create_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_create_action
Create multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_delete_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_delete_action
Delete multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_patch_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_patch_action
Update (using json-patch) multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_bulk_update_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_bulk_update_action
Update multiple Certs at the same time # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_create_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_create_action
Creates a Cert # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_delete_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_delete_entity_action
Deletes a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_find_all_entities_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_find_all_entities_action
Find all possible Certs entities # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_find_entity_by_id_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_find_entity_by_id_action
Find a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_patch_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_patch_entity_action
Updates (using json-patch) a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_renew_cert(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_renew_cert
Renew a certificates with the same attributes as the original one # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_certificates_controller_update_entity_action(self):
"""Test case for otoroshi_controllers_adminapi_certificates_controller_update_entity_action
Updates a specific Cert using its id # noqa: E501
"""
pass
def test_otoroshi_controllers_adminapi_templates_controller_initiate_certificate(self):
"""Test case for otoroshi_controllers_adminapi_templates_controller_initiate_certificate
Creates a new Certificate from a template # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"josephbeeson@gmail.com"
] | josephbeeson@gmail.com |
f2898fb3b0248f525810044f3f4e5a5ce1ec680b | 79acd6922037d309857d95fb4a633788525a0535 | /infrastructure/ansible/roles/dataset_loader/files/selection.set.py | aacaf48079e3a85eeea16a62f5685d58c4c8b3ab | [
"Apache-2.0",
"MPL-2.0",
"BSD-3-Clause",
"CC0-1.0",
"Artistic-2.0",
"CC-BY-SA-4.0",
"MIT",
"ISC",
"BSD-2-Clause",
"OFL-1.1",
"JSON",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"WTFPL",
"LGPL-2.0-or-later",
"X11"
] | permissive | apache/trafficcontrol | 86da3a526e5e0d533547969274cd30d74636d922 | e835435e47175f84a04234d15183ab7b61cc2825 | refs/heads/master | 2023-08-31T08:02:25.363164 | 2023-08-30T23:00:43 | 2023-08-30T23:00:43 | 67,198,520 | 811 | 438 | Apache-2.0 | 2023-09-14T18:12:55 | 2016-09-02T07:00:06 | Go | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is used to provide a round-robin merging of two lists
import sys
import json
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("{}")
sys.exit(0)
cdn_csv_list = sys.argv[1].split(',')
fqdn_csv_list = sys.argv[2].split(',')
option = ''
if len(sys.argv) == 4:
option = sys.argv[3]
cdn_csv_list.sort()
fqdn_csv_list.sort()
step_size = len(cdn_csv_list)
out_list_normal = {}
for i, val in enumerate(cdn_csv_list):
sublist = fqdn_csv_list[i:]
out_list_normal[val] = ','.join(sublist[::step_size])
out_list_denormal = {}
for val, csvlist in out_list_normal.items():
for i in csvlist.split(','):
if i != "":
out_list_denormal[i] = val
if option == 'denormalize':
print(json.dumps(out_list_denormal))
else:
print(json.dumps(out_list_normal))
| [
"noreply@github.com"
] | apache.noreply@github.com |
2414522bbf49b310f6773608c43006d53b555cb4 | 47eaf898a430209658df7973ea6b9b266014aa86 | /cont-attn/train.py | 36e92112ec1187dc91c2b61f7e629f47c2c0f336 | [] | no_license | dhruvramani/language-robotics | 89d4ffc376757207f85c73e5d6a06bd8301507e0 | 54e1db11cb5bbcfa3a3ea60ad42d5a572f1b5fb7 | refs/heads/master | 2023-05-11T21:33:04.894238 | 2020-11-17T04:59:44 | 2020-11-17T04:59:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,749 | py | import os
import sys
import torch
import numpy as np
import torch.nn.functional as F
#from tqdm import tqdm # TODO : Remove TQDM
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import *
from models import get_similar_traj
# NOTE : If in future, you operate on bigger hardwares - move to PyTorch Lightning
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(config):
print("Device - ", device)
tensorboard_writer = SummaryWriter(logdir=config.tensorboard_path)
deg = config.deg()
vobs_dim, dof_dim = deg.obs_space[deg.vis_obv_key], deg.obs_space[deg.dof_obv_key]
act_dim = deg.action_space
if config.use_visual_obv:
percept = PerceptionModule(visual_obv_dim=vobs_dim, dof_obv_dim=dof_dim, state_dim=config.vis_emb_dim)
dof_dim += config.vis_emb_dim
if config.model == 'basic_attn':
attn_module = BasicAttnModel(dof_dim, act_dim).to(device)
elif config.model == 'rl_transformer':
attn_module = RLTransformerEncoder(dof_dim, act_dim).to(device)
params = list(attn_module.parameters())
if config.use_visual_obv:
params += list(percept.parameters())
print("Number of parameters : {}".format(len(params)))
optimizer = torch.optim.Adam(params, lr=config.learning_rate)
mse_loss = torch.nn.MSELoss()
if(config.save_graphs):
tensorboard_writer.add_graph(attn_module)
if(config.resume):
if config.use_visual_obv:
percept.load_state_dict(torch.load(os.path.join(config.models_save_path, 'percept.pth')))
attn_module.load_state_dict(torch.load(os.path.join(config.models_save_path, '{}.pth'.format(config.model))))
optimizer.load_state_dict(torch.load(os.path.join(config.models_save_path, 'optimizer.pth')))
print("Run : `tensorboard --logdir={} --host '0.0.0.0' --port 6006`".format(config.tensorboard_path))
dataloader = deg.get_instruct_dataloader if config.use_lang_search else deg.get_traj_dataloader
dataloader = dataloader(batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
max_step_size = len(dataloader.dataset)
for epoch in range(config.max_epochs): #tqdm(, desc="Check Tensorboard"):
if config.use_lang_search:
loss_avg = 0.0
for i, instruct_traj in enumerate(dataloader):
support_trajs = get_similar_traj(config, deg, instruct_traj)
support_trajs = {key : support_trajs[key].float().to(device) for key in support_trajs.keys()}
key_dof_obs, key_actions = support_trajs[deg.dof_obv_key], support_trajs['action']
key_batch_size, key_seq_len = key_dof_obs.shape[0], key_dof_obs.shape[1]
if config.use_visual_obv:
key_vis_obs = support_trajs[deg.vis_obv_key].reshape(key_seq_len * key_batch_size, -1)
key_vis_obs = key_vis_obs.reshape(-1, vobs_dim[2], vobs_dim[0], vobs_dim[1])
key_dof_obs = key_dof_obs.reshape(key_seq_len * key_batch_size, -1)
key_dof_obs = percept(key_vis_obs, key_dof_obs)
key_dof_obs = key_dof_obs.reshape(key_seq_len, key_batch_size, -1)
key_actions = key_actions.reshape(key_seq_len, key_batch_size, -1)
query_traj = {key : instruct_traj[key].float().to(device) for key in instruct_traj.keys()
if key in [deg.vis_obv_key, deg.dof_obv_key, 'action']}
query_dof_obs, query_actions = query_traj[deg.dof_obv_key], query_traj['action']
query_batch_size, query_seq_len = query_dof_obs.shape[0], query_dof_obs.shape[1]
if config.use_visual_obv:
query_vis_obs = query_traj[deg.vis_obv_key].reshape(query_seq_len * query_batch_size, -1)
query_vis_obs = query_vis_obs.reshape(-1, vobs_dim[2], vobs_dim[0], vobs_dim[1])
query_dof_obs = query_dof_obs.reshape(query_seq_len * query_batch_size, -1)
query_dof_obs = percept(query_vis_obs, query_dof_obs)
query_dof_obs = query_dof_obs.reshape(query_seq_len, query_batch_size, -1)
query_actions = query_actions.reshape(query_seq_len, query_batch_size, -1)
# NOTE - Might have to debug here
nopeak_mask = np.triu(np.ones((1, query_seq_len, query_seq_len)), k=1).astype('uint8')
nopeak_mask = Variable(torch.from_numpy(nopeak_mask) == 0)
preds = attn_module(curr_state=query_dof_obs, state_set=key_dof_obs, action_set=key_actions, mask=nopeak_mask)
loss = mse_loss(preds, query_actions)
loss_avg += loss
tensorboard_writer.add_scalar('lang_{}_{}_loss'.format(config.model, "visual" if config.use_visual_obv else "state"), loss, epoch * max_step_size + i)
loss.backward()
optimizer.step()
if int(i % config.save_interval_steps) == 0:
if config.use_visual_obv:
torch.save(percept.state_dict(), os.path.join(config.models_save_path, 'percept.pth'))
torch.save(attn_module.state_dict(), os.path.join(config.models_save_path, '{}.pth'.format(config.model)))
torch.save(optimizer.state_dict(), os.path.join(config.models_save_path, 'optimizer.pth'))
else:
# NOTE - This is for testing purposes only, remove in release.
for i, trajectory in enumerate(dataloader):
trajectory = {key : trajectory[key].float().to(device) for key in trajectory.keys()}
dof_obs, actions = trajectory[deg.dof_obv_key], trajectory['action']
batch_size, seq_len = dof_obs.shape[0], dof_obs.shape[1]
# NOTE : using ^ instead of config.batch_size coz diff. no. of samples possible from data.
if config.use_visual_obv:
vis_obs = trajectory[deg.key_vis_obs].reshape(seq_len * batch_size, -1)
dof_obs = dof_obs.reshape(seq_len * batch_size, -1)
dof_obs = percept(vis_obs, dof_obs)
dof_obs = dof_obs.reshape(seq_len, batch_size, -1)
actions = actions.reshape(seq_len, batch_size, -1)
state_set = dof_obs.reshape(seq_len * batch_size, 1, -1).repeat(1, batch_size, 1)
action_set = actions.reshape(seq_len * batch_size, 1, -1).repeat(1, batch_size, 1)
preds = attn_module(curr_state=dof_obs, state_set=state_set, action_set=action_set)
optimizer.zero_grad()
loss = mse_loss(preds, actions)
tensorboard_writer.add_scalar('{}_loss'.format(config.model), loss, epoch * max_step_size + i)
loss.backward()
optimizer.step()
if int(i % config.save_interval_steps) == 0:
if config.use_visual_obv:
torch.save(percept.state_dict(), os.path.join(config.models_save_path, 'percept.pth'))
torch.save(attn_module.state_dict(), os.path.join(config.models_save_path, '{}.pth'.format(config.model)))
torch.save(optimizer.state_dict(), os.path.join(config.models_save_path, 'optimizer.pth'))
print("Epoch {} | Loss : {}".format(epoch, loss_avg / len(dataloader.dataset)))
if __name__ == '__main__':
from model_config import get_model_args
config = get_model_args()
torch.manual_seed(config.seed)
train(config) | [
"dhruvramani98@gmail.com"
] | dhruvramani98@gmail.com |
dfae5c44fb06d9b66de15c95505e1082411d9afd | 8a84375dac5e6b33215d20e12e0c197aeaa6e83d | /pymoji/__init__.py | c0ca5b641eab739a4af72ee91afc1f188a417cb5 | [
"Apache-2.0"
] | permissive | michaeljoseph/pymoji | 5579af089cabf1784c656e7fddf9d20f9e6f5d6a | 4bf26babc7b968d9a753907d4db5402cfd5c6d63 | refs/heads/master | 2021-01-01T18:12:37.805141 | 2013-12-09T10:42:24 | 2013-12-09T10:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | """Emits HTML from emoji"""
__author__ = 'Michael Joseph'
__email__ = 'michaeljoseph@gmail.com'
__url__ = 'https://github.com/michaeljoseph/pymoji'
__version__ = '0.0.1'
from .emoji import emoji
def pymoji(text):
single_word = len(text.split(' ')) < 2
first_and_last_dont_match = text[0] != text[-1:]
first_character_is_colon = text[0] != ':'
if first_and_last_dont_match and first_character_is_colon and single_word:
text = ':%s:' % text
return emoji(text)
| [
"michaeljoseph+github@gmail.com"
] | michaeljoseph+github@gmail.com |
bb1867412159c6486be01f2224049f1100599ae6 | 2fdea85db7be2d39e52191b5aa444150d5a8e995 | /apps/hbase/src/hbase/hbase_site.py | d4abf7edb18cea66212055a2ad05a2290bf0f813 | [
"Apache-2.0"
] | permissive | bazaarvoice/hue | a464cd28bb181a9977095b05cff31a6c50859bde | 9aa150b0b48e90f236335d49904fef5e49b0d41d | refs/heads/master | 2023-03-18T15:11:29.540137 | 2020-06-16T17:13:37 | 2020-06-17T03:22:15 | 108,895,807 | 3 | 0 | Apache-2.0 | 2020-06-17T03:23:02 | 2017-10-30T19:03:18 | Python | UTF-8 | Python | false | false | 2,525 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os.path
from hadoop import confparse
from desktop.lib.security_util import get_components
from hbase.conf import HBASE_CONF_DIR, USE_DOAS
LOG = logging.getLogger(__name__)
SITE_PATH = None
SITE_DICT = None
_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL = 'hbase.thrift.kerberos.principal'
_CNF_HBASE_AUTHENTICATION = 'hbase.security.authentication'
_CNF_HBASE_IMPERSONATION_ENABLED = 'hbase.thrift.support.proxyuser'
_CNF_HBASE_USE_THRIFT_HTTP = 'hbase.regionserver.thrift.http'
_CNF_HBASE_USE_THRIFT_SSL = 'hbase.thrift.ssl.enabled'
def reset():
global SITE_DICT
SITE_DICT = None
def get_conf():
if SITE_DICT is None:
_parse_site()
return SITE_DICT
def get_server_principal():
principal = get_conf().get(_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL, None)
components = get_components(principal)
if components is not None:
return components[0]
def get_server_authentication():
return get_conf().get(_CNF_HBASE_AUTHENTICATION, 'NOSASL').upper()
def is_impersonation_enabled():
return get_conf().get(_CNF_HBASE_IMPERSONATION_ENABLED, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_http():
return get_conf().get(_CNF_HBASE_USE_THRIFT_HTTP, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_ssl():
return get_conf().get(_CNF_HBASE_USE_THRIFT_SSL, 'FALSE').upper() == 'TRUE'
def _parse_site():
global SITE_DICT
global SITE_PATH
SITE_PATH = os.path.join(HBASE_CONF_DIR.get(), 'hbase-site.xml')
try:
data = file(SITE_PATH, 'r').read()
except IOError, err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (SITE_PATH, err))
return
data = ""
SITE_DICT = confparse.ConfParse(data)
| [
"romain@cloudera.com"
] | romain@cloudera.com |
5033de95c14e1bda42f174d71402c54e3ecbfec5 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/restaurants/restaurant_utils.py | 227f85a7e1f4623d9c874840e4e304d6fdefbce7 | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,195 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\restaurants\restaurant_utils.py
# Compiled at: 2018-10-01 20:40:25
# Size of source mod 2**32: 5998 bytes
from protocolbuffers import Restaurant_pb2
from business.business_enums import BusinessType
from event_testing.resolver import SingleSimResolver
from restaurants.restaurant_tuning import RestaurantTuning, get_restaurant_zone_director
from tunable_multiplier import TunableMultiplier
import services
class RestaurantUtils:
MEAL_COST_MULTIPLIERS = TunableMultiplier.TunableFactory(description='\n Multipliers used to change the value of things in a menu and for the\n overall cost of the meal.\n \n If any member of the party meets the requirement of the multiplier then\n the multiplier is applied once. The benefit will not be applied for \n each Sim in the group that meets the multiplier tests.\n ')
def get_chef_situation(chef_sim=None):
situation_manager = services.get_zone_situation_manager()
if chef_sim is not None:
situations = situation_manager.get_situations_sim_is_in(chef_sim)
else:
situations = situation_manager.running_situations()
for situation in situations:
if type(situation) is RestaurantTuning.CHEF_SITUATION:
return situation
if RestaurantTuning.HOME_CHEF_SITUATION_TAG in situation.tags:
return situation
def get_waitstaff_situation(waitstaff_sim=None):
situation_manager = services.get_zone_situation_manager()
if waitstaff_sim is not None:
situations = situation_manager.get_situations_sim_is_in(waitstaff_sim)
else:
situations = situation_manager.running_situations()
for situation in situations:
if type(situation) is RestaurantTuning.WAITSTAFF_SITUATION:
return situation
def get_menu_message(menu_map, group_sim_ids, chef_order=False, daily_special_ids_map=None, is_recommendation=False):
show_menu_message = Restaurant_pb2.ShowMenu()
menu = Restaurant_pb2.Menu()
active_household = services.active_household()
if active_household is not None:
holiday_multiplier = active_household.holiday_tracker.get_active_holiday_business_price_multiplier(BusinessType.RESTAURANT)
else:
holiday_multiplier = 1.0
tested_meal_cost_multiplier = tested_cost_multipliers_for_group(group_sim_ids)
for course_enum, recipes in menu_map:
course_item = menu.courses.add()
course_item.course_tag = course_enum
daily_special_ids = daily_special_ids_map.get(course_enum, None) if daily_special_ids_map else None
for recipe in recipes:
recipe_item = course_item.items.add()
recipe_item.recipe_id = recipe.guid64
is_daily_special = recipe.guid64 == daily_special_ids
recipe_item.item_type = 1 if is_daily_special else 0
price = recipe.restaurant_base_price
price *= holiday_multiplier
price *= tested_meal_cost_multiplier
if is_daily_special:
price *= RestaurantTuning.DAILY_SPECIAL_DISCOUNT
else:
zone_director = get_restaurant_zone_director()
if zone_director:
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager is not None:
price = business_manager.get_value_with_markup(price)
else:
price *= RestaurantTuning.UNOWNED_RESTAURANT_PRICE_MULTIPLIER
recipe_item.price_override = int(price)
show_menu_message.menu = menu
show_menu_message.sim_ids.extend(group_sim_ids)
show_menu_message.chef_order = chef_order
show_menu_message.recommend_order = is_recommendation
return show_menu_message
def food_on_table_gen(table_id):
slot_types = {
RestaurantTuning.TABLE_FOOD_SLOT_TYPE, RestaurantTuning.TABLE_DRINK_SLOT_TYPE}
object_manager = services.object_manager()
table = object_manager.get(table_id)
if table is None:
return
for table_part in table.parts:
for runtime_slot in table_part.get_runtime_slots_gen(slot_types=slot_types):
yield from runtime_slot.children
if False:
yield None
def tested_cost_multipliers_for_group(group_sim_ids):
cost_multiplier = RestaurantUtils.MEAL_COST_MULTIPLIERS.base_value
sim_info_manager = services.sim_info_manager()
group_sim_info_resolvers = {}
for sim_id in group_sim_ids:
sim_info = sim_info_manager.get(sim_id)
if sim_info is not None:
group_sim_info_resolvers[sim_info] = SingleSimResolver(sim_info)
for multiplier in RestaurantUtils.MEAL_COST_MULTIPLIERS.multipliers:
for sim_info, resolver in group_sim_info_resolvers.items():
if multiplier.tests.run_tests(resolver):
cost_multiplier *= multiplier.multiplier
break
return cost_multiplier | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
6b30db7207514a2684ce861ee9668aafabb830eb | 77941c4e6d28e45039f880cfd55e0a7c9b25e1be | /jax_dft/jax_dft/losses_test.py | a425dcc3b79d334a6609c0f0d9e729c3be18aafa | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | ritesh861/google-research | e29c7ba836a91454eec9a1d39e1af62dc6e4860e | 5d901d6895cc254a911a3cdc97487f04487f44ed | refs/heads/master | 2022-12-31T17:54:24.150450 | 2020-10-23T02:12:53 | 2020-10-23T02:18:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,743 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for jax_dft.losses."""
from absl.testing import absltest
from jax.config import config
import jax.numpy as jnp
import numpy as np
from jax_dft.jax_dft import losses
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class LossesTest(absltest.TestCase):
def test_trajectory_mse_wrong_predict_ndim(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be '
'greater or equal to 2, got 1'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([0.6, 0.6, 0.6, 0.6]),
discount=1.)
def test_trajectory_mse_wrong_predict_target_ndim_difference(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be greater than '
'the size of the shape of target by 1, '
r'but got predict \(2\) and target \(2\)'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
discount=1.)
def test_density_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.4, 0.5, 0.2, 0.3], [0.6, 0.6, 0.6, 0.6]]))),
# ((
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 + 0) / 2 = 0.0175
0.0175)
def test_energy_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.6]]),
predict=jnp.array([[0.4, 0.7]]))),
# ((0.4 - 0.2) ** 2 + (0.7 - 0.6) ** 2) / 2 = 0.025
0.025)
def test_get_discount_coefficients(self):
np.testing.assert_allclose(
losses._get_discount_coefficients(num_steps=4, discount=0.8),
[0.512, 0.64, 0.8, 1.])
def test_trajectory_mse_on_density(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([
[[0.4, 0.5, 0.2, 0.3],
[0.3, 0.3, 0.2, 0.2],
[0.3, 0.3, 0.3, 0.2]],
[[0.6, 0.6, 0.6, 0.6],
[0.6, 0.6, 0.6, 0.5],
[0.6, 0.6, 0.6, 0.6]]]),
discount=0.6)),
# First sample in the batch:
# (
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.3 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 = 0.0231
# Second sample in the batch:
# (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.5 - 0.6) ** 2
# ) / 4 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 = 0.0015
# Loss:
# (0.0231 + 0.0015) / 2 = 0.0123
0.0123)
def test_trajectory_mse_on_energy(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([0.2, 0.6]),
predict=jnp.array([[0.4, 0.3, 0.2], [0.7, 0.7, 0.7]]),
discount=0.6)),
# First sample in the batch:
# ((0.4 - 0.2) ** 2 * 0.6 * 0.6
# + (0.3 - 0.2) ** 2 * 0.6 + (0.2 - 0.2) ** 2) = 0.0204
# Second sample in the batch:
# ((0.7 - 0.6) ** 2 * 0.6 * 0.6
# + (0.7 - 0.6) ** 2 * 0.6 + (0.7 - 0.6) ** 2) = 0.0196
# Loss:
# (0.0204 + 0.0196) / 2 = 0.02
0.02)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
a6cf2f9283eb95bbbcd982e940d290cc39549bec | 475e2fe71fecddfdc9e4610603b2d94005038e94 | /Amazon/RepeatedSubstringPattern.py | 493dc5e2605eec318b294b395ed8cf35491aa97f | [] | no_license | sidhumeher/PyPractice | 770473c699aab9e25ad1f8b7b7cd8ad05991d254 | 2938c14c2e285af8f02e2cfc7b400ee4f8d4bfe0 | refs/heads/master | 2021-06-28T20:44:50.328453 | 2020-12-15T00:51:39 | 2020-12-15T00:51:39 | 204,987,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | '''
Created on Dec 10, 2020
@author: sidteegela
'''
def repeatedSubstringPattern(s) -> bool:
tracker = {}
for item in s:
if item not in tracker:
tracker[item] = 1
else:
tracker[item] += 1
index = 0
values = list(tracker.values())
while index < len(values):
if values[index] != values[index - 1]:
return False
index += 1
return True
# Time complexity: O(n)
# Space: O(n)
if __name__ == '__main__':
s = 'abab'
print(repeatedSubstringPattern(s))
s = 'aba'
print(repeatedSubstringPattern(s))
s = 'abcabcabcabc'
print(repeatedSubstringPattern(s))
| [
"sidhumeher@yahoo.co.in"
] | sidhumeher@yahoo.co.in |
3a210565d34f339e83f5478437ec4d9c55ccc821 | 4aa5d0c2960916fc5aecbae393b3a4c82798e65d | /models/config/__init__.py | 6a0d4a85735c64e63a66d28463151644aaf450bf | [] | no_license | PanJinquan/torch-Slim-Detection-Landmark | 51356bf9582cce84dd2be41e75a2e01c9103b790 | 40400a6ff7376f933899e7f4fead9f994c39d3fe | refs/heads/master | 2023-06-28T15:53:22.624102 | 2021-07-29T02:54:30 | 2021-07-29T02:54:30 | 377,344,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # -*-coding: utf-8 -*-
"""
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2021-03-29 17:17:10
"""
| [
"pan_jinquan@163.com"
] | pan_jinquan@163.com |
8b534fd770aad5f4bd416754cc830b08843ce337 | 1f76f04e44f9e65a96e02ef1314cdd60f4b5e934 | /blog/migrations/0001_initial.py | b8513a6184d7a181cedd022179d80c5d2e6d592a | [] | no_license | eclipsical/blog | a37f0d0248a41c6cf0612c28685e24658aa41ccf | 7d45242d58ac84393d29e862b5b5d7482003ae92 | refs/heads/master | 2020-03-30T19:05:45.994484 | 2018-10-05T02:21:07 | 2018-10-05T02:21:07 | 151,527,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.2 on 2018-10-04 05:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"me@me.com"
] | me@me.com |
3d2364b9ada6318447f8017a3da3808db99cbf44 | 93fce31a2308e23bf36926a0d2967369bd20fefa | /1.10_select_column.py | 3cb9ce96553dfc49597b9c8fe2f91d4a2072040b | [] | no_license | mucollabo/pandasForML | 6e2cc98bc33c346a0f20ba9ec326af503d4d0076 | 24303f0da1271c98717be52a21ba32e435d3851e | refs/heads/master | 2023-08-20T08:27:53.078243 | 2021-10-27T12:22:15 | 2021-10-27T12:22:15 | 291,989,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | import pandas as pd
# DataFrame() 함수로 데이터프레임 변환, 변수 df에 저장
exam_data = {'이름':['서준', '우현', '인아'],
'수학':[90, 80, 70],
'영어':[98, 89, 95],
'음악':[85, 95, 100],
'체육':[100, 90, 90]}
df = pd.DataFrame(exam_data)
print(df)
print(type(df))
print('\n')
# '수학' 점수 데이터만 선택, 변수 math1에 저장
math1 = df['수학']
print(math1)
print(type(math1))
print('\n')
# '영어' 점수 데이터만 선택, 변수 english에 저장
english = df.영어
print(english)
print(type(english))
# '음악', '체육' 점수 데이터를 선택, 변수 music_gym에 저장
music_gym = df[['음악', '체육']]
print(music_gym)
print(type(music_gym))
print('\n')
# '수학' 점수 데이터만 선택, 변수 math2에 저장
math2 = df[['수학']]
print(math2)
print(type(math2)) | [
"mucollabo@gmail.com"
] | mucollabo@gmail.com |
28e42ccfbb3a1ccbfdf2ba32bab2ab46def4b7a0 | c109de66500f74d2527c83feb0343179c1af3568 | /mar19/pm3.py | 47f1073a4281f31c9f7d8fde22e1f71186636857 | [] | no_license | DUanalytics/python20 | aadf3ce6bb149ce8fde146972222875911fa8bda | 4b4e2e29851c550533033a039ae8175da65728cd | refs/heads/master | 2021-04-08T09:56:09.798921 | 2020-04-21T12:04:59 | 2020-04-21T12:04:59 | 248,764,764 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | #Topic:
#-----------------------------
#libraries
from pm4py.algo.discovery.alpha import factory as alpha_miner
from pm4py.objects.log.importer.xes import factory as importer
from pm4py.visualization.petrinet import factory as visualizer
from pm4py.objects.log.importer.csv import factory as csv_importer
event_stream = csv_importer.import_event_stream( os.path.join("pmdata/", "running-example.csv") )
event_stream
event_stream_length = len(event_stream)
print(event_stream_length)
for event in event_stream: print(event)
from pm4py.objects.conversion.log import factory as conversion_factory
log = conversion_factory.apply(event_stream)
from pm4py.objects.log.exporter.csv import factory as csv_exporter
csv_exporter.export(event_stream, "data/outputFile1.csv")
#log = importer.apply('pmdata/running-example.xes')
net, initial_marking, final_marking = alpha_miner.apply(log)
gviz = visualizer.apply(net, initial_marking, final_marking)
visualizer.view(gviz) | [
"dup1966@gmail.com"
] | dup1966@gmail.com |
3cc507441b6d0ecb6bc00051f647692ccfb593ae | 3b9bf497cd29cea9c24462e0411fa8adbfa6ba60 | /leetcode/Problems/915--Partition-Array-into-Disjoint-Intervals-Medium.py | ad0e3e8fc948c8a58b3d68af1c55d3bf29f8cc00 | [] | no_license | niteesh2268/coding-prepation | 918823cb7f4965bec096ec476c639a06a9dd9692 | 19be0766f6b9c298fb32754f66416f79567843c1 | refs/heads/master | 2023-01-02T05:30:59.662890 | 2020-10-17T13:12:34 | 2020-10-17T13:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | class Solution:
def partitionDisjoint(self, A: List[int]) -> int:
maxVals = [A[0]]
minVals = [0 for _ in range(len(A))]
for i in range(1, len(A)):
val = max(maxVals[-1], A[i])
maxVals.append(val)
minVals[-1] = A[-1]
for i in range(len(A)-2, -1, -1):
minVals[i] = min(minVals[i+1], A[i])
# print(maxVals, minVals)
for i in range(len(A)):
if maxVals[i] <= minVals[i+1]:
return i+1
return 1 | [
"akulajayaprakash@gmail.com"
] | akulajayaprakash@gmail.com |
25706eb5c23ab48d24561809b3144d55ad0064c2 | 4094eff8c1777e5bc1c412f18cb1e905ab1de302 | /tests/layout_tests/__init__.py | 31a9cfa80e04414bca678a8b62f75042178aba99 | [
"BSD-3-Clause"
] | permissive | Lunderberg/python-prompt-toolkit | fcbfed4cb0e94c1032916b2766bb635079db54d6 | 7456156e4bdbd3c5ec9e95c78546e6323b77e74f | refs/heads/master | 2020-12-28T22:22:32.082247 | 2015-10-08T14:32:38 | 2015-10-08T14:32:38 | 43,893,510 | 1 | 0 | null | 2015-10-08T14:20:04 | 2015-10-08T14:20:02 | Python | UTF-8 | Python | false | false | 2,074 | py | from __future__ import unicode_literals
#from prompt_toolkit.layout.utils import fit_tokens_in_size
from pygments.token import Token
import unittest
#class FitTokensInSizeTest(unittest.TestCase):
# def setUp(self):
# self.tokens = [(Token, 'Hello world'), (Token, '\n'), (Token, 'line2')]
#
# def test_1(self):
# result = fit_tokens_in_size(self.tokens, width=5, height=3, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2')],
# [(Token, u' ')],
# ])
#
# def test_2(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=3, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n')],
# [(Token, u' ')],
# ])
#
# def test_3(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=2, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n')],
# ])
#
# def test_4(self):
# result = fit_tokens_in_size(self.tokens, width=3, height=1, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l')],
# ])
#
# def test_5(self):
# result = fit_tokens_in_size(self.tokens, width=15, height=4, default_token=Token)
#
# self.assertEqual(result, [
# [(Token, u'H'), (Token, u'e'), (Token, u'l'), (Token, u'l'), (Token, u'o'), (Token, u' '),
# (Token, u'w'), (Token, u'o'), (Token, u'r'), (Token, u'l'), (Token, u'd'), (Token, u' ')],
# [(Token, u'l'), (Token, u'i'), (Token, u'n'), (Token, u'e'), (Token, u'2'), (Token, u' ')],
# [(Token, u' ' * 15)],
# [(Token, u' ' * 15)],
# ])
| [
"jonathan@slenders.be"
] | jonathan@slenders.be |
63acc5bb375a66d9cc31a059f9d4eac63a1eb089 | b4c6200590a093b805036a822b7889c058494b9f | /Gena/hillshade.py | 050ccc8b16fed49e95074784f17deedf51a3cbcc | [
"MIT"
] | permissive | spoddar-JNPR/earthengine-py-notebooks | 2109a52a49357c19f803b76ed635e022ee486ac6 | ff1b5754785d5e25cb11acdbd52b0f31711d061f | refs/heads/master | 2022-12-25T10:34:44.895717 | 2020-10-01T05:38:16 | 2020-10-01T05:38:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Gena/hillshade.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Gena/hillshade.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Gena/hillshade.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
from ee_plugin.contrib import palettes
dem = ee.Image("JAXA/ALOS/AW3D30_V1_1").select('MED')
dem = dem.updateMask(dem.gt(0))
palette = palettes.cb['Pastel1'][7]
#palette = ['black', 'white']
rgb = dem.visualize(**{'min': 0, 'max': 5000, 'palette': palette })
hsv = rgb.unitScale(0, 255).rgbToHsv()
extrusion = 30
weight = 0.7
hs = ee.Terrain.hillshade(dem.multiply(extrusion), 315, 35).unitScale(10, 250).resample('bicubic')
hs = hs.multiply(weight).add(hsv.select('value').multiply(1 - weight))
hsv = hsv.addBands(hs.rename('value'), ['value'], True)
rgb = hsv.hsvToRgb()
Map.setCenter(0, 28, 2.5)
Map.addLayer(rgb, {}, 'ALOS DEM', True)
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | [
"giswqs@gmail.com"
] | giswqs@gmail.com |
76bf8415808dbc0b27091908445074c49baab840 | c6f97d8a8c9f50d494f6e4dbcdd824cd63133a95 | /main/forms.py | 2d2769f579e28302a75be19c653b1de44d40e8fd | [] | no_license | Pavlenkovv/taxi | baa1d9add4fc167191f3fa68e218d0992263c2f0 | 9e645fc0a2fa75f4aa1f604b355919f23047baae | refs/heads/main | 2023-04-10T20:43:41.373294 | 2021-04-21T19:52:49 | 2021-04-21T19:52:49 | 359,902,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from django import forms
import re
class OrderForm(forms.Form):
customer_name = forms.CharField(max_length=70, required=True, label="Ім'я")
customer_phone = forms.CharField(max_length=30, required=True, label='Телефон')
address_from = forms.CharField(max_length=200, required=True, label='Звідки їхати')
address_to = forms.CharField(max_length=200, required=True, label='Куди їхати')
in_what_time = forms.TimeField(required=True, label='Коли їхати')
def clean_customer_name(self):
customer_name = self.cleaned_data["customer_name"].strip()
if re.search(r"[^\u0400-\u0527 \-\']", customer_name, flags=re.IGNORECASE) is not None:
raise forms.ValidationError("Name should have cyrillic characters only")
return customer_name
def clean_customer_phone(self):
customer_phone = self.cleaned_data["customer_phone"].strip()
if re.search(r"^\+380\(\d{2}\)\d{3}\-\d{2}\-\d{2}$", customer_phone) is None:
raise forms.ValidationError("Phone should be in +380(ХХ)ХХХ-ХХ-ХХ format")
return customer_phone | [
"pavlenko.vyacheslav@gmail.com"
] | pavlenko.vyacheslav@gmail.com |
39494e67be7df7d189cb475268b7807d2c2b24e0 | b2f84608cc28c492430e972028fa0e178865c78c | /source_py2/test_combi/__init__.py | 93535bef676360daad2ef5825782c0dab25df153 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cool-RR/combi | 54efa752403a4acb6933475102702e43de93c81d | 9c5c143a792ffd8fb38b6470f926268c8bacbc31 | refs/heads/master | 2021-09-23T10:02:52.984204 | 2021-09-18T08:45:57 | 2021-09-18T08:45:57 | 25,787,956 | 24 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
import sys
try:
import pathlib
except:
from combi._python_toolbox.third_party import pathlib
def __bootstrap():
'''
Add needed packages in repo to path if we can't find them.
This adds `combi`'s root folder to `sys.path` if it can't
currently be imported.
'''
import os
import sys
import imp
def exists(module_name):
'''
Return whether a module by the name `module_name` exists.
This seems to be the best way to carefully import a module.
Currently implemented for top-level packages only. (i.e. no dots.)
Doesn't support modules imported from a zip file.
'''
assert '.' not in module_name
try:
imp.find_module(module_name)
except ImportError:
return False
else:
return True
if not exists('combi'):
combi_candidate_path = pathlib(__file__).parent.parent.absolute()
sys.path.append(combi_candidate_path)
__bootstrap()
| [
"ram@rachum.com"
] | ram@rachum.com |
fc2e24a083446166a3a346474bce6f3981dec982 | 4a72b43463a9dbc661583d5d0ee264430909dc08 | /dinesh/urls.py | f71d675a3394f2777cd4886963a853f350ae62be | [] | no_license | parvatiandsons2/dinesh | 50da144428572668409a74eac99eac63518d9876 | efab648bdd6af896e6e45a28946754486f6c59e5 | refs/heads/master | 2023-03-30T20:16:42.423599 | 2021-04-08T05:01:46 | 2021-04-08T05:01:46 | 350,943,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('website.urls'))
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"parvatiandsons2@gmail.com"
] | parvatiandsons2@gmail.com |
e18e0324f3f9b1ce2e77f25ce997f19b39d12550 | 5a281cb78335e06c631181720546f6876005d4e5 | /sahara-10.0.0/sahara/tests/unit/db/migration/test_migrations.py | c9f5b1f1d6e0dd0b7d53840c5a1522de8b487693 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 23,009 | py | # Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import os
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import utils as db_utils
from sahara.tests.unit.db.migration import test_migrations_base as base
class SaharaMigrationsCheckers(object):
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExist(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnType(self, engine, table, column, column_type):
t = db_utils.get_table(engine, table)
column_ref_type = str(t.c[column].type)
self.assertEqual(column_ref_type, column_type)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(columns), len(t.columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(self.engine)
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
job_binary_internal_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'data',
'datasize'
]
self.assertColumnsExist(
engine, 'job_binary_internal', job_binary_internal_columns)
self.assertColumnCount(
engine, 'job_binary_internal', job_binary_internal_columns)
node_group_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'flavor_id',
'image_id',
'plugin_name',
'hadoop_version',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'floating_ip_pool'
]
self.assertColumnsExist(
engine, 'node_group_templates', node_group_templates_columns)
self.assertColumnCount(
engine, 'node_group_templates', node_group_templates_columns)
data_sources_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type',
'url',
'credentials'
]
self.assertColumnsExist(
engine, 'data_sources', data_sources_columns)
self.assertColumnCount(
engine, 'data_sources', data_sources_columns)
cluster_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'cluster_configs',
'default_image_id',
'anti_affinity',
'tenant_id',
'neutron_management_network',
'plugin_name',
'hadoop_version'
]
self.assertColumnsExist(
engine, 'cluster_templates', cluster_templates_columns)
self.assertColumnCount(
engine, 'cluster_templates', cluster_templates_columns)
job_binaries_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'url',
'extra'
]
self.assertColumnsExist(
engine, 'job_binaries', job_binaries_columns)
self.assertColumnCount(
engine, 'job_binaries', job_binaries_columns)
jobs_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type'
]
self.assertColumnsExist(engine, 'jobs', jobs_columns)
self.assertColumnCount(engine, 'jobs', jobs_columns)
templates_relations_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'flavor_id',
'image_id',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_template_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExist(
engine, 'templates_relations', templates_relations_columns)
self.assertColumnCount(
engine, 'templates_relations', templates_relations_columns)
mains_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExist(
engine, 'mains_association', mains_association_columns)
self.assertColumnCount(
engine, 'mains_association', mains_association_columns)
libs_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExist(
engine, 'libs_association', libs_association_columns)
self.assertColumnCount(
engine, 'libs_association', libs_association_columns)
clusters_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'trust_id',
'is_transient',
'plugin_name',
'hadoop_version',
'cluster_configs',
'default_image_id',
'neutron_management_network',
'anti_affinity',
'management_private_key',
'management_public_key',
'user_keypair_id',
'status',
'status_description',
'info',
'extra',
'cluster_template_id'
]
self.assertColumnsExist(engine, 'clusters', clusters_columns)
self.assertColumnCount(engine, 'clusters', clusters_columns)
node_groups_columns = [
'created_at',
'updated_at',
'id',
'name',
'tenant_id',
'flavor_id',
'image_id',
'image_username',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExist(engine, 'node_groups', node_groups_columns)
self.assertColumnCount(engine, 'node_groups', node_groups_columns)
job_executions_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'job_id',
'input_id',
'output_id',
'start_time',
'end_time',
'cluster_id',
'info',
'progress',
'oozie_job_id',
'return_code',
'job_configs',
'extra'
]
self.assertColumnsExist(
engine, 'job_executions', job_executions_columns)
self.assertColumnCount(
engine, 'job_executions', job_executions_columns)
instances_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'node_group_id',
'instance_id',
'instance_name',
'internal_ip',
'management_ip',
'volumes'
]
self.assertColumnsExist(engine, 'instances', instances_columns)
self.assertColumnCount(engine, 'instances', instances_columns)
self._data_001(engine, data)
def _data_001(self, engine, data):
datasize = 512 * 1024 # 512kB
data = os.urandom(datasize)
t = db_utils.get_table(engine, 'job_binary_internal')
engine.execute(t.insert(), data=data, id='123', name='name')
new_data = engine.execute(t.select()).fetchone().data
self.assertEqual(data, new_data)
engine.execute(t.delete())
def _check_002(self, engine, data):
# currently, 002 is just a placeholder
pass
def _check_003(self, engine, data):
# currently, 003 is just a placeholder
pass
def _check_004(self, engine, data):
# currently, 004 is just a placeholder
pass
def _check_005(self, engine, data):
# currently, 005 is just a placeholder
pass
def _check_006(self, engine, data):
# currently, 006 is just a placeholder
pass
def _pre_upgrade_007(self, engine):
desc = 'magic'
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='pname',
hadoop_version='1', management_private_key='2',
management_public_key='3', status_description=desc)
def _check_007(self, engine, data):
t = db_utils.get_table(engine, 'clusters')
res = engine.execute(t.select().where(t.c.id == '123')).first()
self.assertEqual('magic', res['status_description'])
engine.execute(t.delete())
# check that status_description can keep 128kb.
# MySQL varchar can not keep more then 64kb
desc = 'a' * 128 * 1024 # 128kb
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='plname',
hadoop_version='hversion', management_private_key='1',
management_public_key='2', status_description=desc)
new_desc = engine.execute(t.select()).fetchone().status_description
self.assertEqual(desc, new_desc)
engine.execute(t.delete())
def _check_008(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates',
'security_groups')
self.assertColumnExists(engine, 'node_groups', 'security_groups')
self.assertColumnExists(engine, 'templates_relations',
'security_groups')
def _check_009(self, engine, data):
self.assertColumnExists(engine, 'clusters', 'rollback_info')
def _check_010(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'auto_security_group')
self.assertColumnExists(engine, 'templates_relations',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'open_ports')
def _check_011(self, engine, data):
self.assertColumnExists(engine, 'clusters', 'sahara_info')
def _check_012(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates',
'availability_zone')
self.assertColumnExists(engine, 'node_groups', 'availability_zone')
self.assertColumnExists(engine, 'templates_relations',
'availability_zone')
def _check_014(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates', 'volume_type')
self.assertColumnExists(engine, 'node_groups', 'volume_type')
self.assertColumnExists(engine, 'templates_relations', 'volume_type')
def _check_015(self, engine, data):
provision_steps_columns = [
'created_at',
'updated_at',
'id',
'cluster_id',
'tenant_id',
'step_name',
'step_type',
'completed',
'total',
'successful',
'started_at',
'completed_at',
]
events_columns = [
'created_at',
'updated_at',
'id',
'node_group_id',
'instance_id',
'instance_name',
'event_info',
'successful',
'step_id',
]
self.assertColumnCount(engine, 'cluster_provision_steps',
provision_steps_columns)
self.assertColumnsExist(engine, 'cluster_provision_steps',
provision_steps_columns)
self.assertColumnCount(engine, 'cluster_events', events_columns)
self.assertColumnsExist(engine, 'cluster_events', events_columns)
def _check_016(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates',
'is_proxy_gateway')
self.assertColumnExists(engine, 'node_groups', 'is_proxy_gateway')
self.assertColumnExists(engine, 'templates_relations',
'is_proxy_gateway')
def _check_017(self, engine, data):
self.assertColumnNotExists(engine, 'job_executions', 'progress')
def _check_018(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates',
'volume_local_to_instance')
self.assertColumnExists(engine, 'node_groups',
'volume_local_to_instance')
self.assertColumnExists(engine, 'templates_relations',
'volume_local_to_instance')
def _check_019(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates', 'is_default')
self.assertColumnExists(engine, 'cluster_templates', 'is_default')
def _check_020(self, engine, data):
self.assertColumnNotExists(engine, 'cluster_provision_steps',
'completed')
self.assertColumnNotExists(engine, 'cluster_provision_steps',
'completed_at')
self.assertColumnNotExists(engine, 'cluster_provision_steps',
'started_at')
def _check_021(self, engine, data):
self.assertColumnExists(engine, 'job_executions', 'data_source_urls')
def _check_022(self, engine, data):
columns = [
'created_at',
'updated_at',
'id',
'job_id',
'tenant_id',
'name',
'description',
'mapping_type',
'location',
'value_type',
'required',
'order',
'default'
]
self.assertColumnCount(engine, 'job_interface_arguments', columns)
self.assertColumnsExist(engine, 'job_interface_arguments', columns)
def _check_023(self, engine, data):
self.assertColumnExists(engine, 'clusters',
'use_autoconfig')
self.assertColumnExists(engine, 'cluster_templates',
'use_autoconfig')
self.assertColumnExists(engine, 'node_group_templates',
'use_autoconfig')
self.assertColumnExists(engine, 'node_groups',
'use_autoconfig')
self.assertColumnExists(engine, 'templates_relations',
'use_autoconfig')
def _check_024(self, engine, data):
tables = [
'node_group_templates',
'node_groups',
'templates_relations',
'clusters',
'cluster_templates'
]
for table in tables:
self.assertColumnExists(engine, table, 'shares')
def _check_025(self, engine, data):
self.assertColumnType(engine, 'instances', 'internal_ip',
'VARCHAR(45)')
self.assertColumnType(engine, 'instances', 'management_ip',
'VARCHAR(45)')
def _check_026(self, engine, data):
tables = [
'clusters',
'cluster_templates',
'node_group_templates',
'data_sources',
'job_executions',
'jobs',
'job_binary_internal',
'job_binaries',
]
for table in tables:
self.assertColumnExists(engine, table, 'is_public')
self.assertColumnExists(engine, table, 'is_protected')
def _check_027(self, engine, data):
self.assertColumnNotExists(engine, 'job_executions',
'oozie_job_id')
self.assertColumnExists(engine, 'job_executions',
'engine_job_id')
def _check_028(self, engine, data):
self.assertColumnExists(engine, 'instances', 'storage_devices_number')
def _pre_upgrade_029(self, engine):
t = db_utils.get_table(engine, 'node_group_templates')
engine.execute(t.insert(), id='123', name='first', plugin_name='plg',
hadoop_version='1', flavor_id='1', volumes_per_node=0,
is_default=True, is_protected=False)
engine.execute(t.insert(), id='124', name='second', plugin_name='plg',
hadoop_version='1', flavor_id='1', volumes_per_node=0,
is_default=False, is_protected=False)
t = db_utils.get_table(engine, 'cluster_templates')
engine.execute(t.insert(), id='123', name='name', plugin_name='plg',
hadoop_version='1', is_default=True, is_protected=False)
engine.execute(t.insert(), id='124', name='name', plugin_name='plg',
hadoop_version='1', is_default=False,
is_protected=False)
def _check_029(self, engine, data):
t = db_utils.get_table(engine, 'node_group_templates')
res = engine.execute(t.select().where(t.c.id == '123')).first()
self.assertTrue(res['is_protected'])
res = engine.execute(t.select().where(t.c.id == '124')).first()
self.assertFalse(res['is_protected'])
engine.execute(t.delete())
t = db_utils.get_table(engine, 'cluster_templates')
res = engine.execute(t.select().where(t.c.id == '123')).first()
self.assertTrue(res['is_protected'])
res = engine.execute(t.select().where(t.c.id == '124')).first()
self.assertFalse(res['is_protected'])
engine.execute(t.delete())
def _check_030(self, engine, data):
health_check_columns = [
'status',
'name',
'description',
'id',
'verification_id',
'created_at',
'updated_at'
]
verification_columns = [
'status',
'id',
'cluster_id',
'created_at',
'updated_at'
]
self.assertColumnCount(engine, 'cluster_verifications',
verification_columns)
self.assertColumnsExist(engine, 'cluster_verifications',
verification_columns)
self.assertColumnCount(engine, 'cluster_health_checks',
health_check_columns)
self.assertColumnsExist(engine, 'cluster_health_checks',
health_check_columns)
def _check_031(self, engine, data):
plugins_data_columns = [
'name',
'id',
'tenant_id',
'version_labels',
'plugin_labels',
'updated_at',
'created_at'
]
self.assertColumnCount(engine, 'plugin_data',
plugins_data_columns)
self.assertColumnsExist(engine, 'plugin_data',
plugins_data_columns)
def _check_033(self, engine, data):
self.assertColumnExists(engine, 'clusters', 'anti_affinity_ratio')
def _check_034(self, engine, data):
self.assertColumnExists(engine, 'node_groups',
'boot_from_volume')
self.assertColumnExists(engine, 'node_group_templates',
'boot_from_volume')
self.assertColumnExists(engine, 'templates_relations',
'boot_from_volume')
def _check_035(self, engine, data):
for col in ['boot_volume_type',
'boot_volume_availability_zone',
'boot_volume_local_to_instance']:
self.assertColumnExists(engine, 'node_groups', col)
self.assertColumnExists(engine, 'node_group_templates', col)
self.assertColumnExists(engine, 'templates_relations', col)
class TestMigrationsMySQL(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,
base.TestModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class TestMigrationsPostgresql(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,
base.TestModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
be30c3872f1815d3af578c8204f8e60513a18891 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03697/s653793193.py | 248b93ad63ebad3fe0b3447475e7ea5825cc23c6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | import math
A,B=list(map(int, input().split()))
if A+B<10:
print(A+B)
else:
print('error')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c51d78ade22a78303932a2159fdb50ace55e9012 | 06bb8e09d8b078707aba33d727876c9f3f24b882 | /class/object_init.py | 167747ecc7c19645d1948140af11607f859d741e | [] | no_license | 82seongkyum/python_lecture | 8a698fdee42d9e110d61a5623afc8ca6dca52411 | f24f684eb440400243b57ea432495493e53f6879 | refs/heads/main | 2023-08-25T20:15:20.653686 | 2021-11-12T09:04:50 | 2021-11-12T09:04:50 | 426,548,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | class Car:
name = ""
speed = 0
def __init__(self, name, speed):
self.name = name
self.speed = speed
def getName(self):
print()
return self.name
def getSpeed(self):
print()
return self.speed | [
"you@example.com"
] | you@example.com |
cc0af1f9c13efdc971dda7d358872093427b70ae | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2018/4/blocking_input.py | 708ab7b1efa7bff4948a2cd53ffab3fd9b7f997e | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 11,548 | py | """
This provides several classes used for blocking interaction with figure
windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for
interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking
way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for
interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that
will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import six
import matplotlib.lines as mlines
import logging
_log = logging.getLogger(__name__)
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
_log.info("Event %i", len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks = []
def add_event(self, event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self, index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self, index=-1):
self.pop_event(index)
pop.__doc__ = pop_event.__doc__
def __call__(self, n=1, timeout=30):
"""
Blocking call to retrieve n events
"""
if not isinstance(n, int):
raise ValueError("Requires an integer argument")
self.n = n
self.events = []
self.callbacks = []
if hasattr(self.fig.canvas, "manager"):
# Ensure that the figure is shown, if we are managing it.
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append(
self.fig.canvas.mpl_connect(n, self.on_event))
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
button_add = 1
button_pop = 3
button_stop = 2
def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event'))
self.button_add = mouse_add
self.button_pop = mouse_pop
self.button_stop = mouse_stop
def post_event(self):
"""
This will be called to process events
"""
if len(self.events) == 0:
_log.warning("No events yet")
elif self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == self.button_pop:
self.mouse_event_pop(event)
elif button == self.button_stop:
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
if event.key is None:
# at least in mac os X gtk backend some key returns None.
return
key = event.key.lower()
if key in ['backspace', 'delete']:
self.mouse_event_pop(event)
elif key in ['escape', 'enter']:
# on windows XP and wxAgg, the enter key doesn't seem to register
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def mouse_event_add(self, event):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self, -1)
def mouse_event_stop(self, event):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self, -1)
# This will exit even if not in infinite mode. This is
# consistent with MATLAB and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def mouse_event_pop(self, event):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self, -1)
# Now remove any existing clicks if possible
if len(self.events) > 0:
self.pop(event, -1)
def add_click(self, event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata, event.ydata))
_log.info("input %i: %f,%f" %
(len(self.clicks), event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
line = mlines.Line2D([event.xdata], [event.ydata],
marker='+', color='r')
event.inaxes.add_line(line)
self.marks.append(line)
self.fig.canvas.draw()
def pop_click(self, event, index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
# NOTE: I do NOT understand why the above 3 lines does not work
# for the keyboard backspace event on windows XP wxAgg.
# maybe event.inaxes here is a COPY of the actual axes?
def pop(self, event, index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(event, index)
BlockingInput.pop(self, index)
def cleanup(self, event=None):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self, n=n, timeout=timeout)
return self.clicks
class BlockingContourLabeler(BlockingMouseInput):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self, cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure)
def add_click(self, event):
self.button1(event)
def pop_click(self, event, index=-1):
self.button3(event)
def button1(self, event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
if event.inaxes == self.cs.ax:
self.cs.add_label_near(event.x, event.y, self.inline,
inline_spacing=self.inline_spacing,
transform=False)
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self, event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
self.inline = inline
self.inline_spacing = inline_spacing
BlockingMouseInput.__call__(self, n=n, timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=(
'button_press_event', 'key_press_event'))
def post_event(self):
"""
Determines if it is a key event
"""
if len(self.events) == 0:
_log.warning("No events yet")
else:
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self, n=1, timeout=timeout)
return self.keyormouse
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
363dbc525fdce119551e489406287b849a979d1f | cdc0e3f0725519f526e61fe00e42393c59c0b05c | /src/nsf/transforms/base_test.py | 496ccc5760a2da07ba8b90ee34f1532fea69030c | [] | no_license | conormdurkan/lfi | e525dddd2d4c43065e9586f4a61d55c77591399e | c3919c251084763e305f99df3923497a130371a2 | refs/heads/master | 2021-01-01T21:47:52.650312 | 2020-02-11T12:13:29 | 2020-02-11T12:13:29 | 239,355,208 | 41 | 14 | null | 2020-02-11T13:04:24 | 2020-02-09T18:44:18 | Python | UTF-8 | Python | false | false | 5,176 | py | """Tests for the basic transform definitions."""
import unittest
import numpy as np
import torch
from nsf.transforms import base
from nsf.transforms import standard
from nsf.transforms.transform_test import TransformTest
class CompositeTransformTest(TransformTest):
def test_forward(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transforms = [
standard.AffineScalarTransform(scale=2.0),
standard.IdentityTransform(),
standard.AffineScalarTransform(scale=-0.25),
]
composite = base.CompositeTransform(transforms)
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = composite(inputs)
outputs_ref, logabsdet_ref = reference(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_inverse(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transforms = [
standard.AffineScalarTransform(scale=2.0),
standard.IdentityTransform(),
standard.AffineScalarTransform(scale=-0.25),
]
composite = base.CompositeTransform(transforms)
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = composite.inverse(inputs)
outputs_ref, logabsdet_ref = reference.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
class MultiscaleCompositeTransformTest(TransformTest):
def create_transform(self, shape, split_dim=1):
mct = base.MultiscaleCompositeTransform(num_transforms=4, split_dim=split_dim)
for transform in [
standard.AffineScalarTransform(scale=2.0),
standard.AffineScalarTransform(scale=4.0),
standard.AffineScalarTransform(scale=0.5),
standard.AffineScalarTransform(scale=0.25),
]:
shape = mct.add_transform(transform, shape)
return mct
def test_forward(self):
batch_size = 5
for shape in [(32, 4, 4), (64,), (65,)]:
with self.subTest(shape=shape):
inputs = torch.ones(batch_size, *shape)
transform = self.create_transform(shape)
outputs, logabsdet = transform(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + [np.prod(shape)])
self.assert_tensor_is_good(logabsdet, [batch_size])
def test_forward_bad_shape(self):
shape = (8,)
with self.assertRaises(ValueError):
transform = self.create_transform(shape)
def test_forward_bad_split_dim(self):
batch_size = 5
shape = [32]
inputs = torch.randn(batch_size, *shape)
with self.assertRaises(ValueError):
transform = self.create_transform(shape, split_dim=2)
def test_inverse_not_flat(self):
batch_size = 5
shape = [32, 4, 4]
inputs = torch.randn(batch_size, *shape)
transform = self.create_transform(shape)
with self.assertRaises(ValueError):
transform.inverse(inputs)
def test_forward_inverse_are_consistent(self):
batch_size = 5
for shape in [(32, 4, 4), (64,), (65,), (21,)]:
with self.subTest(shape=shape):
transform = self.create_transform(shape)
inputs = torch.randn(batch_size, *shape).view(batch_size, -1)
self.assert_forward_inverse_are_consistent(transform, inputs)
class InverseTransformTest(TransformTest):
def test_forward(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transform = base.InverseTransform(standard.AffineScalarTransform(scale=-2.0))
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = transform(inputs)
outputs_ref, logabsdet_ref = reference(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
def test_inverse(self):
batch_size = 10
shape = [2, 3, 4]
inputs = torch.randn(batch_size, *shape)
transform = base.InverseTransform(standard.AffineScalarTransform(scale=-2.0))
reference = standard.AffineScalarTransform(scale=-0.5)
outputs, logabsdet = transform.inverse(inputs)
outputs_ref, logabsdet_ref = reference.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size] + shape)
self.assert_tensor_is_good(logabsdet, [batch_size])
self.assertEqual(outputs, outputs_ref)
self.assertEqual(logabsdet, logabsdet_ref)
if __name__ == "__main__":
unittest.main()
| [
"conormdurkan@gmail.com"
] | conormdurkan@gmail.com |
0d86ce0ae62e08aae4be88b40a18e089e43ca219 | a262edcef5d1def670103b849eef9cf203510a40 | /tp_analysis/preprocessing/matrix_operation.py | 1c25ff7fcc74e75b2d0c362819425f7c774816e6 | [] | no_license | clarkwkw/GEStatProj | 19439fc50e673cf77444b1513bb5ac9c71fd0022 | 45f9ca393ae179827906c21c1cd758621cfbf4ce | refs/heads/master | 2020-04-03T22:38:38.823677 | 2018-05-22T07:50:53 | 2018-05-22T07:50:53 | 59,290,283 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | py | import json
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA, TruncatedSVD, IncrementalPCA, SparsePCA
import textbook
import nltk.stem
stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
def batch_data(series, batch_count):
length = len(series)
batch_size = length // batch_count
arr = []
start = 0
for i in range(batch_count):
end = start + batch_size + (i < length % batch_count)
if end > length:
end = length
arr.append(series[start:end])
start = end
return arr
def by_predefined_words(train_texts, valid_texts = [], words = None, force_dense = True):
vocabs = {}
if words is None:
words = textbook.getTopVocabs("all", 30)
for i in range(len(words)):
vocabs[words[i]] = i
vectorizer = CountVectorizer(vocabulary = vocabs)
train_matrix = vectorizer.transform(train_texts)
valid_matrix = vectorizer.transform(valid_texts)
if force_dense:
train_matrix, valid_matrix = train_matrix.todense(), valid_matrix.todense()
return (train_matrix, valid_matrix, words)
def normalize(train_matrix, valid_matrix = None, norm_info = None):
n_cols = None
norm_dict = {'total': train_matrix.shape[1]}
if norm_info is not None:
n_cols = norm_info["total"]
else:
n_cols = train_matrix.shape[1]
for i in range(n_cols):
mean, std = None, None
if norm_info is not None:
mean = norm_info["%d"%i]["mean"]
std = norm_info["%d"%i]["std"]
else:
mean = np.mean(train_matrix[:, i])
std = np.std(train_matrix[:, i])
norm_dict[i] = {"mean": mean, "std": std}
if std != 0:
train_matrix[:, i] = (train_matrix[:, i] - mean)/std
if valid_matrix is not None:
valid_matrix[:, i] = (valid_matrix[:, i] - mean)/std
else:
train_matrix[:, i] = 0.5
if valid_matrix is not None:
valid_matrix[:, i] = 0.5
return train_matrix, valid_matrix, norm_dict
# Perform 3 steps to generate training/ validating texts:
# 1. Construct the bag of words
# Parameters:
# ngram_rng: tuple, the lower and uppper bound of the length of a ngram
# words_src: "textbook"/"samples" / list of strings, the source to consider
# tb_chs: list of textbook chapters/ None, when words_src = "textbook", the chapters of textbook to consider
# selection: None/ "tfidf"/ "idf", strategy to select the bag of words
# select_top, select_bottom: integer, no. of words to select according the top/ bottom values of selection strategy
#
# 2. Dimensionality reduction
# Parameters:
# reduction: None/ "pca"/ "lsa"/ "ipca", strategy for dimensionality reduction
# reduce_n_attr: integer, desired no. of dimensions after reduction
#
# 3. Normalization
# Parameters:
# normalize_flag: boolean, if set to true, columns will be normalized to 0 mean and variance 1
#
# Other parameters:
# save_dir: string/ None, save preprocessing settings to the specified directory if not None
def preprocess(train_texts, valid_texts = [], normalize_flag = False, ngram_rng = (1,1), words_src = None, tb_chs = None, selection = None, select_top = 0, select_bottom = 0, reduction = None, reduce_n_attr = None, stem_words = False, savedir = None):
vectorizer, vect_texts = None, None
if type(words_src) is list:
train_matrix, valid_matrix, words = by_predefined_words(train_texts, valid_texts, words_src)
else:
if words_src == "textbook":
vect_texts = textbook.getOrderedText(chs = tb_chs)
if stem_words:
vectorizer = textbook.getTfidfVectorizer(ngram_rng, chs = tb_chs)
else:
vectorizer = StemmedTfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
vectorizer.fit(textbook.getOrderedText(tb_chs))
elif words_src == "samples":
vect_texts = train_texts
if stem_words:
vectorizer = StemmedTfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
else:
vectorizer = TfidfVectorizer(ngram_range = ngram_rng, stop_words = 'english')
vectorizer.fit(train_texts)
elif isinstance(words_src, TfidfVectorizer):
vectorizer = words_src
else:
raise Exception("Unexpected value for 'words_src'")
if selection == "tfidf":
tfidf_matrix = vectorizer.transform(vect_texts).toarray()[0]
tuples = []
for vocab in vectorizer.vocabulary_:
index = vectorizer.vocabulary_[vocab]
if selection == "idf":
score = vectorizer.idf_[index]
elif selection == "tfidf":
score = tfidf_matrix[index]
elif selection is None:
score = vectorizer.idf_[index]
else:
raise Exception("Unexpected selection type")
tuples.append((vocab, score, index))
selected_tuples = []
if selection is None or select_top + select_bottom >= len(tuples):
selected_tuples = tuples
elif select_top + select_bottom > 0:
tuples = sorted(tuples, key = lambda x: x[1], reverse = True)
selected_tuples = tuples[0:select_top] + tuples[(len(tuples)-select_bottom):]
else:
raise Exception("Must specify 'select_top'/'select_bottom' when 'selection' is not None")
selected_words = [tup[0] for tup in selected_tuples]
train_matrix, valid_matrix, words = by_predefined_words(train_texts, valid_texts, selected_words, force_dense = reduction not in ["lsa"])
pca_components, norm_info = None, None
reductor = None
if reduction is not None:
if reduction == "pca":
reductor = PCA(n_components = reduce_n_attr)
elif reduction == "lsa":
reductor = TruncatedSVD(n_components = reduce_n_attr)
elif reduction == "ipca":
reductor = IncrementalPCA(n_components = reduce_n_attr)
else:
raise Exception("Unexpected reduction strategy '%s'"%reduction)
train_matrix = reductor.fit_transform(train_matrix)
valid_matrix = reductor.transform(valid_matrix)
pca_components = reductor.components_
if normalize_flag:
train_matrix, valid_matrix, norm_info = normalize(train_matrix, valid_matrix)
if savedir is not None:
preprocess = {
"words": words,
"pca": reduction is not None
}
if normalize_flag:
preprocess["norm_info"] = norm_info
with open(savedir+'/preprocess.json', "w") as f:
f.write(json.dumps(preprocess, indent = 4))
if reduction is not None:
np.save(savedir+"/pca.npy", pca_components)
return train_matrix, valid_matrix, words
| [
"clarkwkw@yahoo.com.hk"
] | clarkwkw@yahoo.com.hk |
5d0899b6f77856ea65209c4696f20e4176cf521f | 855ff14a494aa47e2ab4c09a58468d99c1eb92f5 | /tests/test_download.py | a663ff12d797820386a8173687d6de5fd601d990 | [
"MIT"
] | permissive | nnnyt/EduData | 9ece284f5143d84ec9eb483fcac3747e2a1fcfba | 1827f12167a68f15776cd303ce550814633f1256 | refs/heads/master | 2020-07-06T00:56:32.627104 | 2019-08-17T05:14:12 | 2019-08-17T05:14:12 | 202,838,340 | 0 | 0 | MIT | 2019-08-17T05:11:15 | 2019-08-17T05:11:15 | null | UTF-8 | Python | false | false | 516 | py | # coding: utf-8
# create by tongshiwei on 2019/7/2
import time
import pytest
from EduData import get_data
from EduData.download_data import url_dict
def test_download(tmp_path):
for url in url_dict:
get_data(url, tmp_path, override=True)
time.sleep(1)
for url in url_dict:
with pytest.raises(FileExistsError):
get_data(url, tmp_path, override=False)
time.sleep(1)
for url in url_dict:
get_data(url, tmp_path, override=True)
time.sleep(1)
| [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
20f76be639652910aa375130dcb4ae19041a2617 | 23b0203547fdcba7065afba855abb0a3d6d79bc4 | /util/perp.py | 80e33b350269aa15a7fc1a9bb6554e39530a279c | [] | no_license | jielaizhang/archangel | 4336c13fbb67452e5eef868cd7debcb2e5e86a7c | a28cfb509f9ae5e0b0cbd8b25285772da2658c44 | refs/heads/master | 2021-01-17T18:10:53.541526 | 2016-10-28T23:03:22 | 2016-10-28T23:03:22 | 71,165,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | #!/usr/bin/env python
def perp(m,b,x,y):
# find perpenticular distance from line to x,y
if m != 0.:
c=y+x/m
r=(c-b)/(m+1./m)
else:
r=x
s=m*r+b
d=((r-x)**2+(s-y)**2)**0.5
if r <= x:
return d
else:
return -d
| [
"zhang.jielai@gmail.com"
] | zhang.jielai@gmail.com |
2133a820156c371ee986d90af631a6a95f55dabd | 4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c | /剑指offer/第五遍/17.打印从1到最大的n位数.py | bfaf04ffc577d04086babfe256bb04a2bb399b4e | [] | no_license | leilalu/algorithm | bee68690daf836cc5807c3112c2c9e6f63bc0a76 | 746d77e9bfbcb3877fefae9a915004b3bfbcc612 | refs/heads/master | 2020-09-30T15:56:28.224945 | 2020-05-30T03:28:39 | 2020-05-30T03:28:39 | 227,313,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | """
输入数字 n,按顺序打印出从 1 到最大的 n 位十进制数。比如输入 3,则打印出 1、2、3 一直到最大的 3 位数 999。
示例 1:
输入: n = 1
输出: [1,2,3,4,5,6,7,8,9]
说明:
用返回一个整数列表来代替打印
n 为正整数
"""
"""
最大的n位数可能会超出存储范围,因此需要用字符串来保存这些数字,并且模拟加法运算
打印数字时要从第一个非0数字开始
当第一位数字发生进位时,模拟加法结束
"""
class Solution1:
def printNumbers(self, n):
# 首先判断输入是否合法
if n < 1:
return []
res = []
num = ['0'] * n
while not self.Increment(num):
# 打印num,加入res中
number = int(self.PrintNum(num))
res.append(number)
return res
def Increment(self, num):
circle = 0
length = len(num) # n
isOverflow = False
for i in range(length-1, -1, -1):
sumValue = circle + int(num[i]) # 计算每一位的和
# 如果是最后一位,还要加1
if i == length-1:
sumValue += 1
# 计算是否有进位
if sumValue >= 10:
# 如果是第一位要进位,则结束
if i == 0:
isOverflow = True
break
else:
sumValue -= 10
circle = 1
num[i] = str(sumValue)
else:
num[i] = str(sumValue)
break
return isOverflow
def PrintNum(self, num):
for i in range(len(num)):
if num[i] != '0':
return ''.join(num[i:])
class Solution2:
def printNumbers(self, n):
if n < 1:
return []
self.res = []
num = ['0'] * n
for i in range(10):
num[0] = str(i)
self.printNumbersCore(num, n, 0)
# 从1开始打印,0要舍去
return self.res[1:]
def printNumbersCore(self, num, length, index):
if index == length-1:
self.res.append(self.PrintNum(num))
return
for i in range(10):
num[index+1] = str(i)
self.printNumbersCore(num, length, index+1)
def PrintNum(self, num):
for i in range(len(num)):
if num[i] != '0':
return int(''.join(num[i:]))
if __name__ == '__main__':
n = 1
res = Solution2().printNumbers(n)
print(res)
| [
"244492644@qq.com"
] | 244492644@qq.com |
cc4581dcb239c15cbc2e561069528a340c92b34d | f48f9798819b12669a8428f1dc0639e589fb1113 | /programming/misc/googlemock/actions.py | 0e8cff036c9e369f402327537b8b77901e62731c | [] | no_license | vdemir/PiSiPackages-pardus-2011-devel | 781aac6caea2af4f9255770e5d9301e499299e28 | 7e1867a7f00ee9033c70cc92dc6700a50025430f | refs/heads/master | 2020-12-30T18:58:18.590419 | 2012-03-12T03:16:34 | 2012-03-12T03:16:34 | 51,609,831 | 1 | 0 | null | 2016-02-12T19:05:41 | 2016-02-12T19:05:40 | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
# Remove bundled gtest and use the system one.
# acx_pthread.m4 is needed in configure stage, so keep it.
shelltools.move("gtest", "gtest.bak")
shelltools.makedirs("gtest/m4")
shelltools.copy("gtest.bak/m4/acx_pthread.m4", "gtest/m4")
shelltools.unlinkDir("gtest.bak")
autotools.autoreconf("-vfi")
autotools.configure("--disable-static")
# Remove rpath from speexenc and speexdec
pisitools.dosed("libtool", "^hardcode_libdir_flag_spec=.*", "hardcode_libdir_flag_spec=\"\"")
pisitools.dosed("libtool", "^runpath_var=LD_RUN_PATH", "runpath_var=DIE_RPATH_DIE")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("CHANGES", "COPYING", "CONTRIBUTORS", "README")
| [
"kaptan@pisipackages.org"
] | kaptan@pisipackages.org |
3e2ffe9fb08e3de17a22adf567ea3bae5d89cad3 | a814debee728e59a7a10d8c12b92c1f3ee97e19d | /Atividade/cadastro_alunos.py | 3549954447d09ca6bc18697b41ec82898a0ea902 | [] | no_license | PedroVitor1995/Algoritmo-ADS-2016.1 | 0ee034d2f03b29d3c8177fb3402f7aeae08d07cf | 8e3b6dfb0db188b9f5d68dcb8619f6636883ab89 | refs/heads/master | 2021-01-01T15:51:56.636502 | 2017-07-19T13:47:36 | 2017-07-19T13:47:36 | 81,328,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | def main():
menu = (' 1 - Cadastrar \n 2 - Listar \n 3 - Remover \n 0 - Sair \n Opcao: ')
alunos = []
while True:
opcao = input(menu)
if opcao == 1:
cadastrar(alunos)
print ('Aluno cadastrado com sucesso!')
elif opcao == 2:
listar(alunos)
elif opcao == 3:
remover(alunos)
elif opcao == 0:
print('Saindo do programa')
reposta = raw_input('Dejesa realmente sair S/N: ')
if reposta == 'N' or reposta == 'n':
continue
else:
break
else:
print ('Opcao invalida.')
print ('Programa finalizado.')
def cadastrar(alunos):
aluno = {}
aluno['Nome'] = raw_input('Nome: ')
while True:
aluno['Idade'] = input('Idade: ')
if aluno['Idade'] > 0:
break
else:
continue
while True:
aluno['Sexo'] = raw_input('Sexo M/F: ')
if aluno['Sexo'] == 'F' or aluno['Sexo'] == 'M' or aluno['Sexo'] == 'f' or aluno['Sexo'] == 'm':
break
else:
continue
alunos.append(aluno)
return alunos
def listar(alunos):
print ('Alunos Cadastrados (%d)') % len(alunos)
for i in range(len(alunos)):
print alunos[i]
def remover(alunos):
listar(alunos)
nome = raw_input('Digite o nome do aluno que deseja remover: ')
quantidade = 0
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome:
quantidade += 1
if quantidade == 1:
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome:
del alunos[i]
print ('Aluno %s removido com sucesso!')%(nome)
break
else:
idade = input('Digite a idade do aluno que dejesa remover: ')
for i in range(len(alunos)):
if alunos[i]['Nome'] == nome and alunos[i]['Idade'] == idade:
del alunos[i]
print ('Aluno %s com idade %d removido com sucesso!') % (nome,idade)
break
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | PedroVitor1995.noreply@github.com |
c0fdef125f38ff9c8ea632183c45e314e09b6c90 | 874cb9539283a5dc2616f3c5ae6ca852a63291ed | /classLaneLineHistory.py | ab77f1ddc45c7d4dfef990769231a7b827d77de0 | [] | no_license | redherring2141/ac_lane_detection | 494ddd89ab41a6e63032325127d63f6aee6f4478 | 132da04ac7becd4d8f7750936272bcbaea56d975 | refs/heads/master | 2022-11-05T00:42:32.124816 | 2020-06-22T02:50:57 | 2020-06-22T02:50:57 | 250,525,327 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import math
import numpy as np
from classLaneLine import LaneLine, create_queue
class LaneLineHistory:
def __init__(self, queue_depth=2, test_points=[50, 300, 500, 700], poly_max_deviation_distance=150):
self.lane_lines = create_queue(queue_depth)
self.smoothed_poly = None
self.test_points = test_points
self.poly_max_deviation_distance = poly_max_deviation_distance
def append(self, lane_line, force=False):
if len(self.lane_lines) == 0 or force:
self.lane_lines.append(lane_line)
self.get_smoothed_polynomial()
return True
test_y_smooth = np.asarray(list(map(lambda x: self.smoothed_poly[0] * x**2 + self.smoothed_poly[1] * x + self.smoothed_poly[2], self.test_points)))
test_y_new = np.asarray(list(map(lambda x: lane_line.polynomial_coeff[0] * x**2 + lane_line.polynomial_coeff[1] * x + lane_line.polynomial_coeff[2], self.test_points)))
dist = np.absolute(test_y_smooth - test_y_new)
#dist = np.absolute(self.smoothed_poly - lane_line.polynomial_coeff)
#dist_max = np.absolute(self.smoothed_poly * self.poly_max_deviation_distance)
max_dist = dist[np.argmax(dist)]
if max_dist > self.poly_max_deviation_distance:
print("**** MAX DISTANCE BREACHED ****")
print("y_smooth={0} - y_new={1} - distance={2} - max-distance={3}".format(test_y_smooth, test_y_new, max_dist, self.poly_max_deviation_distance))
return False
self.lane_lines.append(lane_line)
self.get_smoothed_polynomial()
return True
def get_smoothed_polynomial(self):
all_coeffs = np.asarray(list(map(lambda lane_line: lane_line.polynomial_coeff, self.lane_lines)))
self.smoothed_poly = np.mean(all_coeffs, axis=0)
return self.smoothed_poly | [
"redherring2141@kaist.ac.kr"
] | redherring2141@kaist.ac.kr |
8702836fc9279ef168fe2ff273935ae450062b85 | c1e8ddcfda2586ddc6be93ff60a77428150d6921 | /DiegoRocha/NCtBkjKq.py | c0769eb652b1b3dae72bae2d8f527197b1334206 | [] | no_license | weltonvaz/PythonBR | 740fde6aa0040d13100005669f1a011f52573580 | f2bf033d692aee3f79ff1ec2644799cb7f3f5585 | refs/heads/master | 2020-05-09T20:53:06.416836 | 2017-06-15T21:05:38 | 2017-06-15T21:05:38 | 27,550,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def toByteArray(s, withLen):
if (len(s) > 255):
raise Exception('String precisa ter menos que 255 caracteres')
buffer = bytearray()
if withLen:
buffer.append(len(s))
buffer.extend(s)
return buffer
BIN_TO_ASC = [
'+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
def toString(_bytes):
buf = []
try:
_len = len(_bytes)
except:
_len = _bytes.size
flush = False
if (_len % 3 != 0):
_bytes.append(0)
flush = True
counter = last = 0
for i in range(_len):
b = _bytes[i] & 0xFF if i < _len else 0
if counter == 0:
buf.append(BIN_TO_ASC[b >> 2])
last = b
elif counter == 1:
buf.append(BIN_TO_ASC[((last & 0x03) << 4) | ((b & 0xF0) >> 4)])
last = b
elif counter == 2:
buf.append(BIN_TO_ASC[((last & 0x0F) << 2) | ((b & 0xC0) >> 6)])
if ( not (flush and i == _len - 1)):
buf.append(BIN_TO_ASC[b & 0x3F])
last = 0
else:
pass
counter = (counter+1) % 3
return ''.join(buf)
print (toString(146)) | [
"weltonvaz@gmail.com"
] | weltonvaz@gmail.com |
6a8a20770c65bc69e42feab1370ccc2a82883911 | 5ef6b1140f4fe7467595f6f49eeb3ec211424317 | /test/busbooking.py | 676654475e7bdc1d4e4d41316a176d8b48cbf0d2 | [] | no_license | publiccoding/prog_ln | d0ddc5ea69be6de6d2c3da0535f924985fcd2585 | 9c3210d177000d0a05cc9a0f1f281cebb8785adb | refs/heads/master | 2021-09-25T04:15:31.109539 | 2018-10-18T01:04:41 | 2018-10-18T01:04:41 | 117,082,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py |
import sys
#arg1=sys.argv[1]
# Register user for bus booking and stor the data in userdata.txt file
reg = input("Enter your options :\nUser Registartion -> 1 \nUser Login -> 2\n" )
if reg == "1":
uname = input("Enter your username")
pwd = input("Enter your password ")
userloginData = f' {uname} = {pwd} \n'
with open ('userdata.txt','a') as file:
file.write(userloginData)
# User login by validating username and password
elif reg == "2":
username = input("Enter login username")
password = input("Enter password of the user")
logindata = {}
with open('userdata.txt','r') as file:
userdata = file.readlines()
data = [data.strip().split('=') for data in userdata]
for data in data:
logindata[data[0]] = data[1]
if username in logindata:
if logindata[username] == password:
print("login successfully")
exit(0)
print("Login unsuccessfull")
exit(1)
| [
"thimmarayan.krishnappa@gmail.com"
] | thimmarayan.krishnappa@gmail.com |
b21593a469f076810e694af3e1ea5f70b772276d | 2a8a12ee952dcd33edefdb8966a3b575b70b3b25 | /BBS/app01/migrations/0001_initial.py | 3a5ea63a18c50e5f259d734045528373ea292993 | [] | no_license | okada8/python | 288e964aa6fd324fa65569d69e8d58ad51753a63 | ebce77d4ce724676e95aba85da293471f8f41cef | refs/heads/master | 2022-12-02T07:15:42.795504 | 2019-06-13T00:28:15 | 2019-06-13T00:28:15 | 177,994,227 | 3 | 1 | null | 2022-11-22T03:52:30 | 2019-03-27T12:53:51 | Python | UTF-8 | Python | false | false | 8,961 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-05-01 02:08
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('phone', models.BigIntegerField(blank=True, null=True)),
('create_time', models.DateField(auto_now_add=True)),
('avatar', models.FileField(default='avatar/default.jpg', upload_to='avatar/')),
],
options={
'verbose_name': '用户表',
'verbose_name_plural': '用户表',
'db_table': 'bs_user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32)),
('desc', models.CharField(max_length=256)),
('content', models.TextField()),
('create_time', models.DateField(auto_now_add=True)),
('comment_num', models.IntegerField(default=0)),
('up_num', models.IntegerField(default=0)),
('down_num', models.IntegerField(default=0)),
],
options={
'verbose_name': '文章表',
'verbose_name_plural': '文章表',
'db_table': 'bs_article',
},
),
migrations.CreateModel(
name='Article2Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Article')),
],
),
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(max_length=32)),
('site_title', models.CharField(max_length=32)),
('theme', models.CharField(max_length=32)),
],
options={
'verbose_name': '个人站点表',
'verbose_name_plural': '个人站点表',
'db_table': 'bs_blog',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Blog')),
],
options={
'verbose_name': '文章分类表',
'verbose_name_plural': '文章分类表',
'db_table': 'bs_category',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=128)),
('create_time', models.DateField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Article')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app01.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '点赞点踩表',
'verbose_name_plural': '点赞点踩表',
'db_table': 'bs_comment',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Blog')),
],
options={
'verbose_name': '文章标签表',
'verbose_name_plural': '文章标签表',
'db_table': 'bs_tag',
},
),
migrations.CreateModel(
name='UpAndDown',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_up', models.BooleanField()),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '点赞点踩表',
'verbose_name_plural': '点赞点踩表',
'db_table': 'bs_upanddown',
},
),
migrations.AddField(
model_name='article2tag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Tag'),
),
migrations.AddField(
model_name='article',
name='blog',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app01.Blog'),
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app01.Category'),
),
migrations.AddField(
model_name='article',
name='tags',
field=models.ManyToManyField(through='app01.Article2Tag', to='app01.Tag'),
),
migrations.AddField(
model_name='userinfo',
name='blog',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='app01.Blog'),
),
migrations.AddField(
model_name='userinfo',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='userinfo',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| [
"1141912008@qq.com"
] | 1141912008@qq.com |
70ef33ff15c709e33a7c4f4f2936bf571dc0ec70 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N391_Perfect_Rectangle.py | 044dd2bf7f6161f54a015998313d9f3a11947085 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | import collections
"""
1. sum of the area of all small rectangles equals to the final large rectangle's area
2. all lbnodes and runodes should not be duplicated
3. the four nodes for the final rectangle should only show one time
"""
class Solution(object):
def isRectangleCover(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: bool
"""
leftb,leftu, rightb, rightu = [], [], [], []
area = 0
nodes = []
lbnodes, runodes = [], []
for i, j, p, q in rectangles:
leftb.append((i, j))
rightu.append((p, q))
area += (p - i) * (q - j)
nodes.extend([(i, j), (p, q), (i, q), (p, j)])
lbnodes.append((i, j))
runodes.append((p, q))
l_b = min(leftb)
r_u = max(rightu)
l_u = (l_b[0], r_u[1])
r_b = (r_u[0], l_b[1])
if len(lbnodes) != len(set(lbnodes)) or len(runodes) != len(set(runodes)):
return False
new_area = (r_u[0] - l_b[0]) * (r_u[1] - l_b[1])
if new_area != area:
return False
counter = collections.Counter(nodes)
if counter[l_b] != 1 or counter[l_u] != 1 or counter[r_b] != 1 or counter[r_u] != 1:
return False
for key, value in counter.items():
if value == 1 and key not in [l_b, r_u, l_u, r_b]:
return False
return True
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
dead213f276db1180f44d4940987bca1e0b1b23b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/390/usersdata/317/78498/submittedfiles/ex1.py | 3bd112ad4b74a11944a5f2b00b8a9a4df4aca8e1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
import math
a = int(input('Digite a: '))
b = int(input('Digite b: '))
c = int(input('Digite c: '))
d = (-b**2) - (4*a*c)
print('o delta é {}'.format(d)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cf842d0d2b18248cba2a8b5531313703ef3b37b2 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /LightGBM_sklearn_scipy_numpy/source/sklearn/linear_model/tests/test_logistic.py | 031520362a5286d53d7e98566a22c26b61f61a71 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 49,337 | py | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver='liblinear', n_jobs=2)
assert_warns_message(UserWarning,
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2.",
lr.fit, iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42),
LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2,
multi_class='ovr', random_state=42)
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ('Logistic Regression supports only liblinear, newton-cg, '
'lbfgs, sag and saga solvers, got wrong_name')
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']:
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ['sag', 'saga']:
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
max_iter=1000,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
solver=solver,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo'])
# The predictions should be in original labels
assert_equal(sorted(np.unique(lr_str.predict(X_ref))),
['bar', 'baz', 'foo'])
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))),
['bar', 'baz', 'foo'])
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz'])
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10,))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
max_iter = 2000 if solver in ['sag', 'saga'] else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-5 if solver in ['sag', 'saga'] else 1e-2,
cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10,))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-7
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False, tol=tol,
max_iter=10000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_saga = LogisticRegressionCV(solver='saga', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10,
random_state=42)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
random_state=42)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2}, random_state=42)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20, random_state=0)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'saga', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='saga')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag', 'saga'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag', 'saga', 'lbfgs']
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 10)
y = np.concatenate([y] * 10)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(n_samples=50, n_features=20,
random_state=0)
X_sparse = sparse.csr_matrix(X_sparse)
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ['l1', 'l2']:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1. / (n_samples * alpha),
solver='saga',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
liblinear = LogisticRegression(
C=1. / (n_samples * alpha),
solver='liblinear',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
def test_dtype_match():
# Test that np.float32 input data is not cast to np.float64 when possible
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = np.array(X).astype(np.float64)
y_64 = np.array(Y1).astype(np.float64)
X_sparse_32 = sp.csr_matrix(X, dtype=np.float32)
for solver in ['newton-cg']:
for multi_class in ['ovr', 'multinomial']:
# Check type consistency
lr_32 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_32.fit(X_32, y_32)
assert_equal(lr_32.coef_.dtype, X_32.dtype)
# check consistency with sparsity
lr_32_sparse = LogisticRegression(solver=solver,
multi_class=multi_class)
lr_32_sparse.fit(X_sparse_32, y_32)
assert_equal(lr_32_sparse.coef_.dtype, X_sparse_32.dtype)
# Check accuracy consistency
lr_64 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_64.fit(X_64, y_64)
assert_equal(lr_64.coef_.dtype, X_64.dtype)
assert_almost_equal(lr_32.coef_, lr_64.coef_.astype(np.float32))
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
6c3535ee17c4fa616fa64f8e4dae8094aeda358f | 092dd56a1bf9357466c05d0f5aedf240cec1a27b | /tests/pytests/problems/TestProblemDefaults.py | 1744a84c983e9cb7877f2d329d7c84617e83f61c | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | c5f872c6afff004a06311d36ac078133a30abd99 | refs/heads/main | 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 1,130 | py | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
# @file tests/pytests/problems/TestProblemDefaults.py
#
# @brief Unit testing of Python ProblemDefaults object.
import unittest
from pylith.testing.UnitTestApp import TestComponent
from pylith.problems.ProblemDefaults import (ProblemDefaults, problem_defaults)
class TestProblemDefaults(TestComponent):
"""Unit testing of ProblemDefaults object.
"""
_class = ProblemDefaults
_factory = problem_defaults
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestProblemDefaults))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.