blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ab8aa09439b7feb4e64e7287cafca2b7cd40a94
|
cc2fd2bd9947d97cbd351553dad75044f6978cf9
|
/driver2.py
|
e63c6f4cb68c2d2442c635a3df87c770a4d7fbfe
|
[] |
no_license
|
jsitaraman/MDOF
|
7dc1178254e14dabb86c4670f8f619546fe75b5f
|
96162e408c887ccf8954e117b5bff016bd359110
|
refs/heads/main
| 2022-12-24T13:44:51.690443
| 2020-10-08T00:36:51
| 2020-10-08T00:36:51
| 301,932,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
#
# driver code for sizing a simple
# rotor.
#
# Design Variables are
# [ Cl -> mean Lift Coefficient,
# R -> Radius of the rotor,
# Omega -> Rotor frequency
# sigma -> Rotor solidity]
#
# Objective:
# Maximize pay-load with a constraint of maximum power available
#
import numpy as np
from AeroSolver import AeroModel
from WeightSolver import WeightModel
from MDOF import MDOFinputs
from MDOF import FunctionsAndConstraints
from MDOF import optimizer
#
modelParams={'units': 'SI',
'rho':1.2256,
'bladeDensity':600,
'emptyWeight':49000,
'payLoad':0,
'grossWeight':82000}
designvar=['Cl','R','Omega','sigma']
#
inputObject=MDOFinputs(designvar)
inputs=inputObject.inputVar()
aero=AeroModel('momentumTheory',modelParams)
weight=WeightModel('simpleWeight',modelParams)
#
# create the input to output mapping
#
# input--->----- Aero Model
# | |
# ----- Weight Model ---> output
#
x1=aero.getModel(inputs)
outputs=weight.getModel(x1)
#
# create the objective function and
# constraints from vehicle response
#
fc=FunctionsAndConstraints(inputObject,inputs,outputs)
objective=fc.get('function','PayLoad',fsign=-1)
gradient=fc.get('gradient','PayLoad',fsign=-1)
ineqconstraint=[]
ineqconstraintgrad=[]
eqconstraint=[]
eqconstraintgrad=[]
eqconstraint.append(fc.get('constraint','Power',constraintValue=1e6))
eqconstraintgrad.append(fc.get('constraintgrad','Power',constraintValue=1e6))
#
# intialize optimizer object
# provide it the objectives, gradients and constraints
#
opt2=optimizer(objective,gradient,eqconstraint,eqconstraintgrad,ineqconstraint,ineqconstraintgrad)
#
# starting values and
# bounds
#
x0=np.array([0.6,7.5,25.0,0.08],'d')
lb=[0.1,6.0,10.0,0.06]
ub=[1.0,9.0,30.0,0.12]
#
# perform actual optimization
# functions and gradients are only
# evaluated here
#
x=opt2.optimize(x0,lb,ub,1.0,method='SLSQP')
#
print('designNames :',designvar)
print('values :',x)
#
resp=inputObject.getResponse()
print('stateNames :',resp['varNames'])
print('values :',resp['values'])
#
|
[
"jaina@onyx.erdc.hpc.mil"
] |
jaina@onyx.erdc.hpc.mil
|
657691f0e148e8ba93dc360f4d8b4aad40fc643d
|
b249363c964248fbf8e567e14ecd7c1fc7fde5b2
|
/server/newscraper.py
|
376c3bd9cd2c28bb5f7a900018ff55e13babbd86
|
[] |
no_license
|
Justintlai/watersports
|
fec7e8d9c75ab5b6085f83273207bf851bab5afb
|
0b443299884e4f558c6a7d802365b3c843a4f7de
|
refs/heads/master
| 2021-07-11T21:28:39.253446
| 2017-10-09T00:12:23
| 2017-10-09T00:12:23
| 106,175,648
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,080
|
py
|
import random
import time
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from pws import Bing
from pws.google import strip_tags
word_file = "words.txt"
WORDS = open(word_file).read().splitlines()
def main():
# c = gnp_fixed.get_google_news_query("earth")
# print(c)
# return
# cnn_paper = newspaper.build('http://cnn.com/search/?q=trump')
r = Bing.search_news(10, 0, True, 'h', query='github')
print(r)
return
for article in cnn_paper.articles:
print(article.url)
if "trump" in str(article).lower():
article.download()
article.parse()
article.nlp()
print(article.summary)
break
def get_articles_test(topic, skipNum):
r = [get_article_test(topic, skipNum) for x in range(0, 10)]
return sorted(r, key=getKey, reverse=True)
def generate_news_url(query, num, start, recent, country_code):
query = '+'.join(query.split())
url = 'https://www.google.com/search?q=' + query + '&num=' + num + '&start=' + start
url += '&tbm=nws#q=' + query + '&tbas=0&tbs=sbd:1&tbm=nws'
if recent in ['h', 'd', 'w', 'm', 'y']:
url += '&tbs=qdr:' + recent
if country_code is not None:
url += '&gl=' + country_code
return url
def get_info(i):
pass
def convert_to_epoch_time(param):
time_ago = [int(s) for s in param.split(" ") if s.isdigit()][0]
if "second" in param:
multiplier = 1
elif "minute" in param:
multiplier = 60
elif "hour" in param:
multiplier = 60 * 60
elif "day" in param:
multiplier = 60 * 60 * 24
elif "week" in param:
multiplier = 60 * 60 * 24 * 7
elif "month" in param:
multiplier = 60 * 60 * 24 * 30
elif "year" in param:
multiplier = 60 * 60 * 24 * 365.25
else:
try:
pattern = '%d %b %Y'
return int(time.mktime(time.strptime(param, pattern)))
except Exception as e:
print(e)
raise Exception("Unexpected time duration! {}".format(str(param)))
seconds_ago = multiplier * time_ago
now_epoch_time = int(time.time())
return now_epoch_time - seconds_ago
def scrape_news_result(soup):
raw_results = soup.find_all('div', {'class': 'g'})
results = []
for result in raw_results:
link = result.find('a').get('href')[7:]
raw_link_text = result.find('a')
link_text = strip_tags(str(raw_link_text))
raw_link_info = result.find('div', attrs={'class': 'st'})
link_info = strip_tags(str(raw_link_info))
raw_source = result.find('span', attrs={'class': 'f'})
raw_source = strip_tags(str(raw_source)).split(' - ')
source = raw_source[0]
time = convert_to_epoch_time(raw_source[1])
additional_links = dict()
# Crazy hack! Fix it. + Buggy!
try:
raw_a_links = result.find_all('a')[1:]
if raw_a_links:
raw_source = list(map(strip_tags, list(map(str, result.find_all('span', attrs={'class': 'f'})[1:]))))
for idx in range(len(raw_a_links) - 1):
additional_links[strip_tags(str(raw_a_links[idx]))] = (
raw_a_links[idx].get('href'), raw_source[idx])
except Exception as e:
print(e)
temp = {'link': link,
'link_text': link_text,
'link_info': link_info,
'additional_links': additional_links,
'source': source,
'time': time,
}
results.append(temp)
return results
def scrape_news_result_bing(soup):
raw_results = soup.find_all('div', attrs={'class': 'newsitem'})
results = []
for result in raw_results:
link = result.find('a').get('href')
raw_link_text = result.find('a')
link_text = strip_tags(str(raw_link_text))
additional_links = dict() # For consistancy
raw_link_info = result.find('span', attrs={'class': 'sn_snip'})
link_info = strip_tags(str(raw_link_info))
raw_source = result.find('cite', attrs={'class': 'sn_src'})
source = strip_tags(str(raw_source))
raw_time = result.find('span', attrs={'class': 'sn_tm'})
time = convert_to_epoch_time(strip_tags(str(raw_time)))
temp = {'link': link,
'link_text': link_text,
'link_info': link_info,
'additional_links': additional_links,
'source': source,
'time': time,
}
results.append(temp)
return results
def generate_news_url_bing(query, first, recent, country_code):
"""(str, str) -> str
A url in the required format is generated.
"""
query = '+'.join(query.split())
url = 'http://www.bing.com/news/search?q=' + query + '&first' + first
if recent in ['h', 'd', 'w', 'm',
'y']: # A True/False would be enough. This is just to maintain consistancy with google.
url = url + '&qft=sortbydate%3d%221%22'
if country_code is not None:
url += '&cc=' + country_code
return url
def search_news(query, num=10, start=0, recent=None, country_code=None):
# url = generate_news_url_bing(query, str(start), recent, country_code)
url = generate_news_url(query, str(num), str(start), country_code, recent)
soup = BeautifulSoup(requests.get(url).text, "html.parser")
if "Our systems have detected unusual traffic from your computer network." in str(soup):
pass
results = scrape_news_result(soup)
# results = scrape_news_result_bing(soup)
# raw_total_results = soup.find('div', attrs={'class': 'sd'}).string
# total_results = int(str(raw_total_results).replace(",","").replace("About ","").replace(" results","").strip())
temp = {'results': results,
'url': url,
'num': num,
'start': start,
'search_engine': 'google',
'total_results': 0,
'country_code': country_code,
}
return temp
def getKey(item):
return item["time"]
def get_articles(topic, skipNum):
r = search_news(str(topic), 10, skipNum)
return sorted(r["results"], key=getKey, reverse=True)
def get_random_date():
year = random.choice(range(2001, 2017))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
t = datetime(year, month, day)
birth_date = (t - datetime(1970, 1, 1)).total_seconds()
return str(birth_date)
def get_article_test(topic, skipNum):
temp = {'link': str(skipNum),
'link_text': "".join([random.choice(WORDS) + " " for x in range(0, 10)]),
'link_info': "link_info",
'additional_links': "additional_links",
'source': str(topic),
'time': get_random_date(),
}
return temp
if __name__ == "__main__":
start = "1232131"
end = "1232131"
r = get_articles("slack", 0)
for i in r:
print(i["time"])
pass
|
[
"alexanderfanthome@googlemail.com"
] |
alexanderfanthome@googlemail.com
|
d5c6bf8fc78a0d43c339a77d1a7de8e8d671ddf9
|
3448030a8da01a4c66ecbf52372bb71e929cd873
|
/seed2xprv.py
|
1dbe7932ab0ce4e2fdfb6c9e0ad1a2ef2a9a86cb
|
[
"MIT"
] |
permissive
|
brunokrauss/crypto-key-derivation
|
193662816a4a3f1e27a0144157fe1a056ec59c23
|
78285fca158aaf31a276406286d265318ee359ee
|
refs/heads/master
| 2023-06-24T21:30:54.786702
| 2021-07-06T12:34:23
| 2021-07-06T12:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
#!./venv/bin/python
from lib import mbp32, utils
xprv = mbp32.XKey.from_seed(bytes.fromhex(utils.one_line_from_stdin()))
print(xprv.to_xkey().decode('ascii'))
|
[
"errge@nilcons.com"
] |
errge@nilcons.com
|
ba6306487c83faeec431bad2bb06879a4608adfe
|
84cd1e3493cf0020d39a38c90c5eb7e7b103cf54
|
/card.py
|
51012e2d75aa1f6030fccf472e6a64e272e98c51
|
[] |
no_license
|
jbrunsting/poker-player
|
cf7c8b12a56ce1d52d330bb1ceeb42814476ad90
|
0e81c9f6bacfbd17bda16b573e3b0641c14e72cc
|
refs/heads/master
| 2022-04-21T13:16:48.133194
| 2020-04-20T16:52:11
| 2020-04-20T16:52:11
| 254,972,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
SUIT_INDICES = "shdc"
MIN_CARD = 2
MAX_CARD = 14
NUM_SUITS = 4
class Card:
def __init__(self, suit, val):
self.suit = suit
self.val = val
def __str__(self):
unicode_card = ord('🂠')
if self.val <= 10:
unicode_card += self.val
elif self.val == 11:
unicode_card += 11
elif self.val == 12:
unicode_card += 13
elif self.val == 13:
unicode_card += 14
elif self.val == 14:
unicode_card += 1
unicode_card += self.suit * 16
return chr(unicode_card)
def __eq__(self, other):
return self.suit == other.suit and self.val == other.val
def __lt__(self, other):
if self.val == other.val:
return self.suit < other.suit
return self.val < other.val
|
[
"jbrunsting@uwaterloo.ca"
] |
jbrunsting@uwaterloo.ca
|
7b96378092ff574b8986da643f93a35f77f65f61
|
ab43c27eeac4866aef9b9282d912f24b8238803e
|
/app/views/__init__.py
|
8db42f13b7048140839fd65a2b555069cc2f1a09
|
[] |
no_license
|
RubenVanEldik/solar
|
31c3018322841f271fa70883eb78759141345dd9
|
71d48fea12791a72558ddc0d512a1f6b7e8f4574
|
refs/heads/master
| 2021-10-18T22:33:05.663817
| 2019-02-14T19:59:07
| 2019-02-14T19:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from .charts import *
from .index import *
from .json import *
from .settings import *
from .user_management import *
|
[
"74729@protonmail.com"
] |
74729@protonmail.com
|
962d3aac4478494866be2adb37058437ec68edc0
|
7922540e2993b3f0ee27649becc6881991f26f8c
|
/cryptosite/urls.py
|
eabd33a490445bd108023a6544aaa120d5743498
|
[] |
no_license
|
anirban-21/Cryptosite
|
044cbf5f0a0a23362b1fe561bfd9cd312120fb6b
|
86ec45bdd0e9a2c20f03196c3efe01badedd9238
|
refs/heads/master
| 2022-12-25T05:32:29.508577
| 2020-10-08T09:29:36
| 2020-10-08T09:29:36
| 302,286,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""cryptosite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('crypto.urls')),
]
|
[
"anirbanraha08@gmail.com"
] |
anirbanraha08@gmail.com
|
a772928bcbefc46afefe57c8cef08919a0e31326
|
8942d1537271a4f6707db6e81a786ca13451e469
|
/_examples/example_pipelines/skullstrip_afni_ants_example/porcupine_generated_pipeline.py
|
6ead358190e3d19af7f227b9bb95aa7b885fddfc
|
[
"MIT"
] |
permissive
|
GiraffeTools/Documentation
|
ee78b565ba489e20172e23b88e1cbf6424e2f467
|
40fe4a141a444aa5f016a5d5073ddffba77187d0
|
refs/heads/master
| 2022-11-07T16:18:36.793918
| 2020-08-16T20:44:37
| 2020-08-16T20:44:37
| 174,414,952
| 1
| 1
|
MIT
| 2022-10-19T22:26:58
| 2019-03-07T20:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
#This is a Nipype generator. Warning, here be dragons.
import sys
import nipype
import nipype.pipeline as pe
import nipype.interfaces.io as io
import nipype.interfaces.ants as ants
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
WorkingDirectory = "~/Porcupipelines/ThisStudy"
#Generic datagrabber module that wraps around glob in an
NodeHash_30bb950 = pe.Node(io.S3DataGrabber(outfields=['outfiles']), name = 'NodeName_30bb950')
NodeHash_30bb950.inputs.anon = True
NodeHash_30bb950.inputs.bucket = 'openneuro'
NodeHash_30bb950.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/'
NodeHash_30bb950.inputs.local_directory = '/tmp'
NodeHash_30bb950.inputs.sort_filelist = True
NodeHash_30bb950.inputs.template = 'sub-01/anat/sub-01_T1w.nii.gz'
#Wraps command **N4BiasFieldCorrection**
NodeHash_1ea4b50 = pe.Node(interface = ants.N4BiasFieldCorrection(), name = 'NodeName_1ea4b50')
NodeHash_1ea4b50.inputs.copy_header = False
NodeHash_1ea4b50.inputs.dimension = 3
NodeHash_1ea4b50.inputs.num_threads = 4
NodeHash_1ea4b50.inputs.save_bias = True
#Wraps command **3dUnifize**
NodeHash_291d6d0 = pe.Node(interface = afni.Unifize(), name = 'NodeName_291d6d0')
NodeHash_291d6d0.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dSkullStrip**
NodeHash_1ddfa30 = pe.Node(interface = afni.SkullStrip(), name = 'NodeName_1ddfa30')
NodeHash_1ddfa30.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dcalc**
NodeHash_3bd6370 = pe.Node(interface = afni.Calc(), name = 'NodeName_3bd6370')
NodeHash_3bd6370.inputs.expr = 'a*step(b)'
NodeHash_3bd6370.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **fslmaths**
NodeHash_49ddb10 = pe.Node(interface = fsl.Threshold(), name = 'NodeName_49ddb10')
NodeHash_49ddb10.inputs.args = '-bin'
NodeHash_49ddb10.inputs.thresh = 1.e-3
#Wraps command **3dUnifize**
NodeHash_229c200 = pe.Node(interface = afni.Unifize(), name = 'NodeName_229c200')
NodeHash_229c200.inputs.gm = True
NodeHash_229c200.inputs.outputtype = 'NIFTI_GZ'
#Generic datasink module to store structured outputs
NodeHash_3207070 = pe.Node(interface = io.DataSink(), name = 'NodeName_3207070')
NodeHash_3207070.inputs.base_directory = '/tmp'
#Create a workflow to connect all those nodes
analysisflow = nipype.Workflow('MyWorkflow')
analysisflow.connect(NodeHash_30bb950, 'outfiles', NodeHash_1ea4b50, 'input_image')
analysisflow.connect(NodeHash_1ea4b50, 'output_image', NodeHash_291d6d0, 'in_file')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_1ddfa30, 'in_file')
analysisflow.connect(NodeHash_1ea4b50, 'bias_image', NodeHash_3207070, 'bias_image')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_3bd6370, 'in_file_a')
analysisflow.connect(NodeHash_1ddfa30, 'out_file', NodeHash_3bd6370, 'in_file_b')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_49ddb10, 'in_file')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_229c200, 'in_file')
analysisflow.connect(NodeHash_49ddb10, 'out_file', NodeHash_3207070, 'out_mask')
analysisflow.connect(NodeHash_229c200, 'out_file', NodeHash_3207070, 'out_file')
#Run the workflow
plugin = 'MultiProc' #adjust your desired plugin here
plugin_args = {'n_procs': 1} #adjust to your number of cores
analysisflow.write_graph(graph2use='flat', format='png', simple_form=False)
analysisflow.run(plugin=plugin, plugin_args=plugin_args)
|
[
"timvanmourik@gmail.com"
] |
timvanmourik@gmail.com
|
b2cfeb0e568ca923f191916dc1ad85a9a2cad65e
|
499b280cdaf8714aae193fd9b1aac95926def3b4
|
/2017038076_조준희 2주차과제.py
|
906467128e359f1fea673d747c8a1161ab7eb171
|
[] |
no_license
|
Joonehee-JO/python
|
2972b52bb2472decffb12a4d13b556ac86adc75b
|
857d15153f10a583bc4ad6a17ecf4f09fdcb68c5
|
refs/heads/master
| 2022-11-05T05:41:57.374225
| 2020-06-23T12:58:40
| 2020-06-23T12:58:40
| 264,381,305
| 1
| 0
| null | 2020-05-16T07:16:20
| 2020-05-16T07:16:19
| null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
mem = int(input("참석자의 수를 입력하세요 : "))
chicken = mem * 1
beer = mem * 2
cake = mem * 4
print("치킨의 수 : %d\n" %chicken)
print("맥주의 수 : %d\n" %beer)
print("케잌의 수 : %d\n" %cake)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5d241edba0322488b4b7f84cee1a16c8cd0b1bd6
|
cdd0fa35e6867932d9821b54f3e9897306139d1a
|
/myPracticeProblems/ordered_dict.py
|
ac21f387d95bb5f5a10a305313ea69109d20cc7d
|
[] |
no_license
|
jisshub/python-development
|
cfd4246981999d5bc8cfe4cc15a57ebfada2691e
|
392e7362bf8e83930d410984e985d73a0a2f40d1
|
refs/heads/master
| 2021-01-05T02:25:12.896814
| 2020-03-23T16:05:25
| 2020-03-23T16:05:25
| 240,844,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
from collections import OrderedDict
ordered_dict = OrderedDict()
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
ordered_dict["jissmon"] = 33
print(ordered_dict)
new_dict = dict()
new_dict["a"] = 44
new_dict["a"] = 44
new_dict["b"] = 44
print(new_dict)
|
[
"jissmon476@gmail.com"
] |
jissmon476@gmail.com
|
88844da5196efc28ee33108e41edf931aadaebd1
|
cf54ddb10342bfac8f868eddbfcba11f9729200f
|
/regex.py
|
6c2685af92327db7e3c69460aa3e061f6e3419ab
|
[] |
no_license
|
winters23/projects-python
|
589a88a7d2828685807685646c1b58e1ce32dfac
|
d920cc62cd1ccc79d43a6869916713045509fd9e
|
refs/heads/master
| 2020-04-23T06:38:55.883746
| 2019-03-22T08:06:29
| 2019-03-22T08:06:29
| 170,980,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
import re
txt="The rain is in Spain"
x=re.search("^The.*Spain$",txt)
print(x)
|
[
"jain.diksha2398@gmail.com"
] |
jain.diksha2398@gmail.com
|
b1a1e15b3a0558a5a77872235e3522ea33bab5cc
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/rcc/models/jaxb_element.py
|
49e4e3b8f1e30a23cafa6a6b5a8c3fbc12ef4791
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352
| 2021-01-21T02:06:08
| 2021-01-21T02:06:08
| 331,486,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,874
|
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from rcc.configuration import Configuration
class JAXBElement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'QName',
'value': 'object',
'nil': 'bool',
'global_scope': 'bool',
'type_substituted': 'bool'
}
attribute_map = {
'name': 'name',
'value': 'value',
'nil': 'nil',
'global_scope': 'globalScope',
'type_substituted': 'typeSubstituted'
}
def __init__(self, name=None, value=None, nil=None, global_scope=None, type_substituted=None, local_vars_configuration=None): # noqa: E501
"""JAXBElement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._nil = None
self._global_scope = None
self._type_substituted = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
if nil is not None:
self.nil = nil
if global_scope is not None:
self.global_scope = global_scope
if type_substituted is not None:
self.type_substituted = type_substituted
@property
def name(self):
"""Gets the name of this JAXBElement. # noqa: E501
:return: The name of this JAXBElement. # noqa: E501
:rtype: QName
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JAXBElement.
:param name: The name of this JAXBElement. # noqa: E501
:type: QName
"""
self._name = name
@property
def value(self):
"""Gets the value of this JAXBElement. # noqa: E501
:return: The value of this JAXBElement. # noqa: E501
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this JAXBElement.
:param value: The value of this JAXBElement. # noqa: E501
:type: object
"""
self._value = value
@property
def nil(self):
"""Gets the nil of this JAXBElement. # noqa: E501
:return: The nil of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._nil
@nil.setter
def nil(self, nil):
"""Sets the nil of this JAXBElement.
:param nil: The nil of this JAXBElement. # noqa: E501
:type: bool
"""
self._nil = nil
@property
def global_scope(self):
"""Gets the global_scope of this JAXBElement. # noqa: E501
:return: The global_scope of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._global_scope
@global_scope.setter
def global_scope(self, global_scope):
"""Sets the global_scope of this JAXBElement.
:param global_scope: The global_scope of this JAXBElement. # noqa: E501
:type: bool
"""
self._global_scope = global_scope
@property
def type_substituted(self):
"""Gets the type_substituted of this JAXBElement. # noqa: E501
:return: The type_substituted of this JAXBElement. # noqa: E501
:rtype: bool
"""
return self._type_substituted
@type_substituted.setter
def type_substituted(self, type_substituted):
"""Sets the type_substituted of this JAXBElement.
:param type_substituted: The type_substituted of this JAXBElement. # noqa: E501
:type: bool
"""
self._type_substituted = type_substituted
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JAXBElement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JAXBElement):
return True
return self.to_dict() != other.to_dict()
|
[
"thomas.yu@sagebase.org"
] |
thomas.yu@sagebase.org
|
ce72e0f8bc1244eb6aab5eb1508279184c6fec5c
|
1e5bf133026ff7afa9659652cc3ec1023f98dc1c
|
/326 - Power of Three/PythonSolution2.py
|
14af1e155eee448238449ceaea5fa410eb42ddab
|
[] |
no_license
|
DishantK1807/Leetcode-Practice
|
51ca7d38889e1e2351968b8802185117ab629e78
|
094b33b38f81ce2b5572cdfecc1f5fb5bbd55816
|
refs/heads/master
| 2022-04-08T14:01:24.534693
| 2020-03-11T22:13:54
| 2020-03-11T22:13:54
| 232,390,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
class Solution:
def isPowerOfThree(self, n: int) -> bool:
return n > 0 and 1853020188851841 % n == 0
|
[
"dishant.khanna1807@gmail.com"
] |
dishant.khanna1807@gmail.com
|
8dbddfb4997cc91e26fe303a38174baf39a53b42
|
53eaf9bb812f6f9317a143a08f60bb5515e7de8c
|
/setup.py
|
c401f0283d85762481b013d5265207f65a2113b7
|
[] |
no_license
|
JeffMv/jmm-util-libs
|
103d77972ee7d3f196593396b4a78a6ce7db6617
|
137696ec9126ea35512dcb29500d3d7c6ffdde56
|
refs/heads/master
| 2023-07-11T20:04:18.782120
| 2023-07-01T17:44:45
| 2023-07-01T17:44:45
| 209,101,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
# import sys
# from shutil import rmtree
from setuptools import find_packages, setup, Command
# from jmm import version
# Package meta-data.
NAME = 'jmm'
DESCRIPTION = 'A collection of personal utility functions.'
URL = 'https://github.com/JeffMv/jmm-util-libs'
EMAIL = 'jeffrey.mvutu@gmail.com'
AUTHOR = 'Jeffrey Mvutu Mabilama'
REQUIRES_PYTHON = '>=3.0.0'
VERSION = "0.1.3.2.0"
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
'conversion': ['pandas'],
'selenium': ['selenium', 'requests'],
'parsing': ['bs4', 'lxml'],
'advanced': ['PIL'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = '\n' + fh.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as fh:
exec(fh.read(), about)
else:
about['__version__'] = VERSION
# class UploadCommand(Command):
# """Support setup.py upload."""
# description = 'Build and publish the package.'
# user_options = []
# @staticmethod
# def status(s):
# """Prints things in bold."""
# print('\033[1m{0}\033[0m'.format(s))
# def initialize_options(self):
# pass
# def finalize_options(self):
# pass
# def run(self):
# try:
# self.status('Removing previous builds…')
# rmtree(os.path.join(here, 'dist'))
# except OSError:
# pass
# self.status('Building Source and Wheel (universal) distribution…')
# os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
# self.status('Uploading the package to PyPI via Twine…')
# os.system('twine upload dist/*')
# self.status('Pushing git tags…')
# os.system('git tag v{0}'.format(about['__version__']))
# os.system('git push --tags')
# sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
# include_package_data=True,
# license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
# 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
# 'Programming Language :: Python :: Implementation :: CPython',
# 'Programming Language :: Python :: Implementation :: PyPy'
],
# # $ setup.py publish support.
# cmdclass={
# 'upload': UploadCommand,
# },
)
|
[
"jeffrey.mvutu@gmail.com"
] |
jeffrey.mvutu@gmail.com
|
b68437cefbc5ec307c1eb1133f394abe68e27b9f
|
b2dbc5643ed240f462090aaa796e528a39175de6
|
/FCN_8s_modified_code/read_MITSceneParsingData.py
|
cf47c498dd8f91c5d5393b68ca9dcdb392aeb543
|
[] |
no_license
|
wonikjang/Projects_Python
|
20c889c315d625d855ab55358958ceff590ebea8
|
d5bd73f03f224ac00f845bf16dac86e56be5b3be
|
refs/heads/master
| 2021-09-20T17:22:55.483973
| 2018-08-13T09:03:51
| 2018-08-13T09:03:51
| 89,209,677
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
__author__ = 'charlie'
import numpy as np
import os
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
import TensorflowUtils as utils
# DATA_URL = 'http://sceneparsing.csail.mit.edu/data/ADEChallengeData2016.zip'
DATA_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
def read_dataset(data_dir):
pickle_filename = "MITSceneParsing.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
if not os.path.exists(pickle_filepath):
utils.maybe_download_and_extract(data_dir, DATA_URL, is_zipfile=True)
SceneParsing_folder = os.path.splitext(DATA_URL.split("/")[-1])[0]
result = create_image_lists(os.path.join(data_dir, SceneParsing_folder))
print ("Pickling ...")
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print ("Found pickle file!")
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
training_records = result['training']
validation_records = result['validation']
del result
return training_records, validation_records
def create_image_lists(image_dir):
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
directories = ['training', 'validation']
image_list = {}
for directory in directories:
file_list = []
image_list[directory] = []
# file_glob = os.path.join(image_dir, "images", directory, '*.' + 'jpg')
file_glob = image_dir + "/images/" + directory + '/*.' + 'jpg'
### ====== Modifation : \ or \\ --> /
file_globbed = glob.glob(file_glob)
file_globbed_slash = [ file_glo.replace("\\","/") for file_glo in file_globbed ]
file_list.extend(file_globbed_slash)
if not file_list:
print('No files found')
else:
for f in file_list:
filename = os.path.splitext(f.split("/")[-1])[0]
### ====== Modifation : \ or \\ --> /
# annotation_file = os.path.join(image_dir, "annotations", directory, filename + '.png')
annotation_file = image_dir + "/annotations/" + directory + '/'+ filename + '.png'
if os.path.exists(annotation_file):
record = {'image': f, 'annotation': annotation_file, 'filename': filename}
image_list[directory].append(record)
else:
print("Annotation file not found for %s - Skipping" % filename)
random.shuffle(image_list[directory])
no_of_images = len(image_list[directory])
print ('No. of %s files: %d' % (directory, no_of_images))
return image_list
#directory = 'training'
#image_dir = 'Data_zoo/MIT_SceneParsing/ADEChallengeData2016'
#import os
#
#
#file_glob = image_dir + "/images/" + directory + '/*.' + 'jpg'
#file_glob
#globbed = glob.glob(file_glob)
#globbed
#
#filename = os.path.splitext(file_glob.split("/")[-1])[0]
#
#
#
##f : Data_zoo/MIT_SceneParsing/ADEChallengeData2016\images\training\ADE_train_00009281.jpg
#f : Data_zoo/MIT_SceneParsing/ADEChallengeData2016/images/training/ADE_train_00009281.jpg
#
#filename : ADEChallengeData2016\images\training\ADE_train_00009281 # f 에서 .jpg 없는것
#
#
#
#annotation_file : Data_zoo/MIT_SceneParsing/ADEChallengeData2016\
# annotations\
# training\
#
# ADEChallengeData2016\images\training\ADE_train_00009281.png
#
#
#Annotation file not found for ADEChallengeData2016\images\training\ADE_train_00009281 - Skipping
#
#
#image_dir : Data_zoo/MIT_SceneParsing/ADEChallengeData2016
#f : Data_zoo/MIT_SceneParsing/ADEChallengeData2016/images/training\ADE_train_00007983.jpg
# filename : training\ADE_train_00007983
# annotation_file : Data_zoo/MIT_SceneParsing/ADEChallengeData2016/annotations/training/training\ADE_train_00007983.png
#
#
#Annotation file not found for training\ADE_train_00007983 - Skipping
|
[
"noreply@github.com"
] |
noreply@github.com
|
39b00d8dc0c6e5dbaab58ed8f4cb471672f8834e
|
8d9af8b801aecdba9c1387ccae4b69b52ed8ccad
|
/read_reddit.py
|
1887b3c89da5301a2348aa3b4a4ec372f83a47de
|
[] |
no_license
|
beaugaines/mongo_for_devs
|
f4ab616f2b908c100fc130f9290bab6363a5f773
|
08c154700098538e6084a55c17fcee9e2b67abd5
|
refs/heads/master
| 2018-12-27T22:45:13.577163
| 2013-05-12T20:39:48
| 2013-05-12T20:39:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import json
import urllib2
import pymongo
from pymongo import MongoClient
# connect to db
connection = MongoClient('localhost', 27017)
# connect to reddit db - which does not exist, so - POOF! - it will be created
db = connection.reddit
stories = db.stories
# get the phlegm clot of information that is the Reddit homepage
# had to add a Bob Dobbs header, kept getting 429 otherwise
url = 'http://www.reddit.com/r/technology/.json'
hdrs = { 'User-Agent' : 'Bob Dobbs' }
req = urllib2.Request(url, headers=hdrs)
reddit_page = urllib2.urlopen(req)
# parse json into python objects
parsed = json.loads(reddit_page.read())
# iterate through the items on the page
for item in parsed['data']['children']:
# insert items into Mongo
stories.insert(item['data'])
|
[
"beaugaines@gmail.com"
] |
beaugaines@gmail.com
|
5a22824ddb5f687f87dce78032033bddc8a463e1
|
79c0b1ed8aff76df7ea783e0f290576e48f52a29
|
/itertools/repeat.py
|
26886dd51523e340ca16da10cad58ebce09b24b3
|
[] |
no_license
|
meghnavarma0/DSA-Python
|
8e57f145c4bba1e786457b0a1f68009a4750d42f
|
6b584ffc3c6d8cf4a66e373d723506647b81cc9b
|
refs/heads/master
| 2021-05-22T14:51:51.102164
| 2020-08-21T03:27:09
| 2020-08-21T03:27:09
| 252,970,237
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
from itertools import repeat
a = list(repeat(15, 7))
print(a)
|
[
"meghnavarma0@gmail.com"
] |
meghnavarma0@gmail.com
|
79a2bc716579e8f0dc9a8c32e78c97fd20ea8542
|
a81c14aa16d358c3508abe71aa72506b10bb0d06
|
/models/stock_loader.py
|
7cc370f4cbaf90933b280820a506ce3416a73474
|
[] |
no_license
|
willwallis/StockNotify
|
d046d5b0a108f008f89982afc5f93b83128522a1
|
38cfffbeec52079e06d54d1b35db63be0378bf03
|
refs/heads/master
| 2020-05-18T13:35:36.373138
| 2015-07-28T22:03:40
| 2015-07-28T22:03:40
| 39,859,842
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
#!/usr/bin/env python
# Importing some of Google's AppEngine modules:
from google.appengine.ext import webapp
import os
import csv
# Import modules used by this controller
from models import *
# CLASS TO LOAD LIST OF US STOCKS
class LoadStocks(webapp.RequestHandler):
def get(self):
inputid = self.request.get('action')
if inputid == 'Load':
inputfile = os.path.join(os.path.dirname(__file__), '../data' ,self.request.get('file'))
reader = csv.reader(open(inputfile, 'rb'), delimiter=',', quotechar='"')
counter = 0
for row in reader:
stockrecord = USStockList()
stockrecord.exchange = row[0]
stockrecord.symbol = row[1]
stockrecord.name = row[2]
stockrecord.comboname = row[3]
stockrecord.put()
counter = counter + 1
self.response.out.write('%s records added' % (str(counter)))
elif inputid == 'Delete':
USStocks = db.GqlQuery("SELECT * "
"FROM USStockList ")
counter = 0
for record in USStocks:
record.delete()
counter = counter + 1
self.response.out.write('%s records deleted' % (str(counter)))
elif inputid == 'Count':
USStocks = db.GqlQuery("SELECT * "
"FROM USStockList ")
counter = USStocks.count(limit=10000)
self.response.out.write('%s records' % (str(counter)))
else:
self.response.out.write('Please add an ?Action of Load or Delete')
|
[
"will@knewto.com"
] |
will@knewto.com
|
20f48de587f36ac22f7b751403edee7311221783
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/python/ops/linalg/linalg.py
|
22c87ea697b7d702dec0fb5fe037ea1157fdaf58
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567
| 2018-09-12T16:52:09
| 2018-09-12T16:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.linalg namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.linalg.linalg_impl import *
from tensorflow.python.ops.linalg.linear_operator import *
from tensorflow.python.ops.linalg.linear_operator_composition import *
from tensorflow.python.ops.linalg.linear_operator_diag import *
from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
# pylint: enable=wildcard-import
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
# pylint: disable=protected-access
slogdet = gen_linalg_ops._log_matrix_determinant
# pylint: disable=protected-access
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
# Seal API.
del absolute_import
del array_ops
del division
del gen_linalg_ops
del linalg_ops
del math_ops
del ops
del print_function
del special_math_ops
|
[
"hanshuobest@163.com"
] |
hanshuobest@163.com
|
46a6be98cd37c203fd6efd53b180795a67a6b079
|
ecff7ab1d962ff895b3e9a0b4239329dd03ce966
|
/webpage_text/__init__.py
|
b20daaa188f87b44418af0b010d45a46826360d1
|
[
"MIT"
] |
permissive
|
MSLNZ/pr-webpage-text
|
ea91e138b3e476688a07210e2b0625cb23538ff8
|
7790e8bbeb5cfbb9c0d7ac508903acd7414ff9d5
|
refs/heads/main
| 2022-09-15T12:26:29.947169
| 2022-08-05T21:21:26
| 2022-08-05T21:21:26
| 227,973,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,399
|
py
|
import os
import re
import sys
import argparse
import configparser
from gevent import monkey
monkey.patch_all()
import gevent
from gevent import pywsgi
import requests
from flask import (
Flask,
Markup,
render_template,
request,
send_from_directory,
)
gevent.get_hub().NOT_ERROR += (KeyboardInterrupt,)
PORT = 1683
endpoint_dict = {}
default_dict = {}
default_endpoint = 'defaults'
app = Flask(__name__)
@app.errorhandler(404)
def page_not_found(*args):
return render_template('page_not_found.html', names=endpoint_dict.keys(), url=request.host_url), 404
@app.route('/favicon.ico')
def favicon():
return send_from_directory('static', 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/<name>', methods=['GET', 'PUT'])
def page(name):
if name not in endpoint_dict:
return page_not_found()
if request.method == 'PUT':
data = request.json
data['text'] = Markup(re.sub(r'\n|\\n', '<br>', data['text']))
endpoint_dict[name].update(data)
return render_template('page.html', title=name, **endpoint_dict[name])
@app.route('/'+default_endpoint, methods=['GET'])
def defaults():
return default_dict
def run(*args):
"""Run the web server.
This function is only meant to be called from the command line via the
`webpage-text` entry point (see setup.py).
"""
host = '0.0.0.0'
text = ''
size = 100
refresh = 1.0
use_flask = False
enable_log = False
parser = argparse.ArgumentParser(description='Start a web server to display text on a web page.')
parser.add_argument(
'-c', '--config',
help='path to a configuration file (INI format)'
)
parser.add_argument(
'-H', '--host', default=host,
help='hostname or IP address of the server [default={}]'.format(host)
)
parser.add_argument(
'-p', '--port', default=PORT, type=int,
help='port to run the server on [default={}]'.format(PORT)
)
parser.add_argument(
'-e', '--endpoints', nargs='*',
help='the names of the URL endpoints'
)
parser.add_argument(
'-t', '--text', default=text, nargs='*',
help='initial text to display at each endpoint [default={!r}]'.format(text)
)
parser.add_argument(
'-s', '--size', default=size, type=int,
help='font size (in px) of the text [default={}]'.format(size)
)
parser.add_argument(
'-r', '--refresh', default=refresh, type=float,
help='number of seconds for a web browser to wait before automatically '
'refreshing the web page [default={}]'.format(refresh)
)
parser.add_argument(
'-l', '--log', action='store_true', help='show INFO log messages from the gevent WSGI server'
)
parser.add_argument(
'-f', '--flask', action='store_true', help='use the flask development server in debug mode'
)
if not args:
args = sys.argv[1:]
args = parser.parse_args(args)
if args.config is not None:
if not os.path.isfile(args.config):
sys.exit('FileNotFoundError: ' + args.config)
ini = configparser.ConfigParser()
ini.read(args.config)
host = ini.get('server', 'host', fallback=host)
port = ini.getint('server', 'port', fallback=PORT)
endpoints = [e.strip() for e in ini.get('server', 'endpoints', fallback='').split(',') if e.strip()]
use_flask = ini.getboolean('server', 'use_flask', fallback=use_flask)
enable_log = ini.getboolean('server', 'enable_log', fallback=enable_log)
text = ini.get('text', 'initial', fallback=text)
size = ini.getint('text', 'size', fallback=size)
refresh = ini.getfloat('text', 'refresh', fallback=refresh)
else:
host = args.host
port = args.port
endpoints = args.endpoints
use_flask = args.flask
enable_log = args.log
text = ' '.join(args.text) if args.text else args.text
size = args.size
refresh = args.refresh
if not endpoints:
sys.exit('You must specify at least 1 endpoint')
for endpoint in endpoints:
if endpoint == default_endpoint:
sys.exit('The name of an endpoint cannot be {!r} because this name is reserved'.format(default_endpoint))
print('Added endpoint http://{}:{}/{}'.format(host, port, endpoint))
endpoint_dict[endpoint] = {'text': text, 'size': size, 'refresh': refresh}
default_dict['size'] = size
default_dict['refresh'] = refresh
if use_flask:
# use the development server from flask
app.run(host=host, port=port, debug=True)
else:
print('Server running on http://{}:{}/ (Press CTRL+C to quit)'.format(host, port))
log = 'default' if enable_log else None
server = pywsgi.WSGIServer((host, port), application=app.wsgi_app, log=log)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def put(text, endpoint, host='127.0.0.1', port=PORT, size=None, refresh=None):
"""Update the text that is displayed on a web page.
The URL of the web page to update follows the ``http://host:port/endpoint`` nomenclature.
Parameters
----------
text : str
The text to display on the web page.
endpoint : str
The endpoint of the web page's URL.
host : str, optional
The hostname or IP address of the web server.
port : int, optional
The port number of the web server.
size : int, optional
The font size of the `text`.
refresh : float, optional
The number of second a web browser will wait before it automatically refreshes.
"""
url = 'http://{}:{}/'.format(host, port)
try:
default = default_dict[url]
except KeyError:
default = requests.get(url+default_endpoint).json()
default_dict[url] = {'size': default['size'], 'refresh': default['refresh']}
if size is None:
size = default['size']
if refresh is None:
refresh = default['refresh']
reply = requests.put(url+endpoint.lstrip('/'), json={'text': text, 'size': size, 'refresh': refresh})
if not reply.ok:
matches = re.findall(r'/(\w+)</p>', reply.content.decode())
raise ValueError('Invalid endpoint {!r}. Must be one of: {}'.format(endpoint, ', '.join(matches)))
|
[
"joe.borbely@gmail.com"
] |
joe.borbely@gmail.com
|
2a07c90c6668c494a7b8505df7af945936e34846
|
eb137779fddf4a6be05bdf4b569ac359f6ce39d0
|
/src/utils/forecast_type.py
|
e17da491ee322a8bc8edfd169a18931e60bffde2
|
[
"MIT"
] |
permissive
|
cmu-delphi/flu-contest
|
4ad767f151776eccb7f5dfe6b47261b6c8cf6e50
|
23e1b41313f8863a7732b5861df7c70edd1ee3ad
|
refs/heads/main
| 2022-12-06T23:05:59.795151
| 2020-09-05T19:20:13
| 2020-09-05T19:20:13
| 75,351,258
| 0
| 3
|
MIT
| 2020-09-05T19:20:15
| 2016-12-02T02:09:19
|
Python
|
UTF-8
|
Python
| false
| false
| 47
|
py
|
class ForecastType:
WILI = 1
HOSP = 2
|
[
"jiaxians@andrew.cmu.edu"
] |
jiaxians@andrew.cmu.edu
|
42ced21b32a7c64522b3bdfd8516de69b7762605
|
0688175e67a23facf6ca86b51fd776bcdd6a78d9
|
/getIssue.py
|
d0b5f2e48e192b7fe39e37b69a1b762a56ebd037
|
[] |
no_license
|
MG1333051/IssueSummary
|
e10d1ecc15bb64e0f004e9a71009229243160c30
|
d068763dd240399045011d4cfa744977c59dc9c6
|
refs/heads/master
| 2021-01-12T21:45:15.184634
| 2015-11-17T01:55:55
| 2015-11-17T01:55:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,792
|
py
|
'''
@author: Lenovo
'''
import github
import csv
import urllib
import re
gh = github.GitHub()
#print(gh.users('michaelliao').get())
headers = ['number', 'id', 'reporter', 'created_at', 'updated_at', 'closed_at', 'state',
'locked', 'assignee', 'milestone', 'comments', 'label_name', 'title', 'pull_request',
'user', 'labels', 'html_url', 'labels_url', 'url', 'events_url', 'diff', 'patch',
'comments_url', 'body']
patch_headers = ['number', 'ncommit', 'hash', 'author', 'date', 'subject', 'nfiles', 'ninsertions',
'ndeletions', 'file','changes', 'insertions', 'deletions', 'locations', 'roots']
def parseDiffFile(diff):
location = ''
root = ''
for line in diff:
if line.startswith('@@'):
location = location + line.split('@@')[1] + ';'
root = root + line.split('@@')[2][1:-1] + ';'
return location, root
def parseCommit(commit):
hash = commit[0][5:12]
# print('hash: ', hash)
author = commit[1][5:]
# print('author: ', author)
date = commit[2][5:]
# print('date: ', date)
subject = commit[3][5:]
index = 4
for i in range(index, len(commit)):
if commit[i]=='---':
break
else:
subject += commit[i]
# print('subject: ', subject)
index = i+1
# print(i)
nfiles = 0
ninsertions = 0
ndeletions = 0
files = []
changes = []
insertions = []
deletions = []
# print('get changed files.')
for j in range(index, len(commit)):
# print('j:', j)
if '|' not in commit[j]:
temp = commit[j].strip().split(',')
for str in temp:
if 'changed' in str:
nfiles = re.findall(r"\d+\.?\d*",str)[0]
if 'insertions' in str:
ninsertions = re.findall(r"\d+\.?\d*",str)[0]
if 'deletions' in str:
ndeletions = re.findall(r"\d+\.?\d*",str)[0]
break
else:
temp = commit[j].split('|')
files.append(temp[0].strip())
changes.append(re.findall(r"\d+\.?\d*",temp[1]))
# print(re.findall(r"\d+\.?\d*",temp[1]))
insertions.append(temp[1].count('+'))
deletions.append(temp[1].count('-'))
index = j + 2
file_index = []
for k in range(index,len(commit)):
if commit[k].startswith('diff --'):
file_index.append(k)
file_index.append(k)
# print ('number of files: ', len(file_index)-1)
locations = []
roots = []
for fi in range(0,(len(file_index)-1)):
# print(commit[file_index[fi]:file_index[fi+1]])
# print('parse file: ', fi+1)
location, root = parseDiffFile(commit[file_index[fi]:file_index[fi+1]])
locations.append(location)
roots.append(root)
results = {'hash':hash, 'author':author, 'date':date, 'subject':subject, 'nfiles':nfiles,
'ninsertions':ninsertions, 'ndeletions':ndeletions,
'files':files, 'changes':changes, 'insertions':insertions,
'deletions':deletions, 'locations':locations, 'roots':roots}
return results
def getPatch(patch):
patch_content = urllib.urlopen(patch)
content = patch_content.read().decode("utf8")
lines = content.splitlines()
# print('patch content length: ', len(lines))
commit_index = []
for n in range(0,len(lines)):
if lines[n].startswith('From '):
commit_index.append(n)
commit_index.append(n)
# print ('number of commits: ', len(commit_index)-1)
patch = []
for ci in range(0, (len(commit_index)-1)):
# print(commit_index[ci+1] - commit_index[ci])
# print(len(lines[commit_index[ci]:commit_index[ci+1]]))
# print('parse commit :' , ci+1)
patch.append(parseCommit(lines[commit_index[ci]:commit_index[ci+1]]))
return patch
with open('G:/numpy_patch.csv', 'ab') as patch_file:
pf_csv = csv.DictWriter(patch_file, patch_headers)
pf_csv.writeheader()
with open('G:/numpy.csv','ab') as f:
f_csv = csv.DictWriter(f, headers)
f_csv.writeheader()
for i in range(1,124):
print('page: ', i)
issues = gh.repos('numpy')('numpy').issues.get(state='closed', page=i)
for issue in issues:
try:
print('issue: ', issue['number'])
reporter = issue['user']['login']
label_names = []
for label in issue['labels']:
label_names.append(label['name'])
sep =';'
label_name = sep.join(label_names)
diff = ''
patch = ''
if 'pull_request' in issue.keys():
diff_url = issue['pull_request']['diff_url']
patch_url = issue['pull_request']['patch_url']
patch = getPatch(patch_url)
# print('writing_number of commits: ', len(patch))
for commit in patch:
commit_basic = {'hash':commit['hash'], 'author':commit['author'], 'date':commit['date'],
'subject':commit['subject'], 'nfiles':commit['nfiles'],
'ninsertions':commit['ninsertions'], 'ndeletions':commit['ndeletions']}
# print('writing_number of files: ', len(commit['files']))
files = commit['files']
changes = commit['changes']
insertions = commit['insertions']
deletions = commit['deletions']
locations = commit['locations']
roots = commit['roots']
for nn in range(0, len(commit['files'])):
# print('writing file: ', nn+1, files[nn])
commit_file = {'file':files[nn], 'changes':changes[nn],'insertions':insertions[nn],
'deletions':deletions[nn], 'locations':locations[nn], 'roots':roots[nn]}
commit = {'number':issue['number'], 'ncommit':len(patch)}
commit.update(commit_basic)
commit.update(commit_file)
# print(commit)
with open('G:/numpy_patch.csv', 'ab') as p:
p_csv = csv.DictWriter(p, patch_headers)
p_csv.writerow(commit)
issue_part = {'reporter':reporter, 'label_name':label_name,
'diff':diff_url, 'patch':patch_url}
issue_all = {}
issue_all.update(issue)
issue_all.update(issue_part)
f_csv.writerow(issue_all)
except Exception as e:
print(issue['number'], ': ', e)
with open ('G:/exception.txt', 'a') as ef:
ef.write(str(issue['number']) + ': ' + str(e) + '\t\n')
|
[
"xiaobena@live.cn"
] |
xiaobena@live.cn
|
f8c4c166b16e951159465c40816bd0caddb19816
|
95e6314483e6f238628299150317cf2255437924
|
/bin/process3_calcFCPV.py
|
d5ffdc95537af0fde0969398936db27841058420
|
[
"MIT"
] |
permissive
|
AWHKU/CombiPIPE
|
53d526cd13690ec56cd8417b129ab3e1cd0ef488
|
87c8aa0bf0747ee159b66db3211fb2acf81787b1
|
refs/heads/master
| 2023-08-15T16:27:32.360598
| 2021-10-21T13:38:51
| 2021-10-21T13:38:51
| 258,078,043
| 0
| 1
|
MIT
| 2021-10-21T13:38:52
| 2020-04-23T02:53:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
import sys, os, math
from scipy import stats
import numpy as np
dir,sampleInfo,nwise=sys.argv[1],sys.argv[2],int(sys.argv[3])
def grabInfo(sampleInfo):
file=open(sampleInfo,"r")
refd={}
for ln in file:
ln=ln.strip("\r\n").split(",")
if ln[-1] != "":
if ln[-1] not in refd:
refd[ln[-1]]=[ln[-2]]
else:
refd[ln[-1]].append(ln[-2])
return refd
refd=grabInfo(sampleInfo)
filelist=[fn for fn in os.listdir(dir) if fn.startswith("BC")]
def CPMd(filelist,nwise):
cpmd={}
for fn in filelist:
file=open("/"+dir+"/"+fn,"r")
#file=open(dir+"/"+fn,"r")
samp=fn.split("_")[1].split(".")[0]
for y in range (8):
file.readline()
for ln in file:
ln=ln.strip("\r\n").split(",")
if ln[nwise] not in cpmd:
cpmd[ln[nwise]]={}
cpmd[ln[nwise]][samp]=ln[-1]
else:
cpmd[ln[nwise]][samp]=ln[-1]
return cpmd
cpmd=CPMd(filelist,nwise)
def lgFC(cpmd,refd):
tot=len(refd["0"])*len(refd["1"])
fcd={}
for com in cpmd:
sfc=0
for a in refd["0"]:
for b in refd["1"]:
if a in cpmd[com] and b in cpmd[com]:
key=b+"_"+a
fc=float(cpmd[com][a])-float(cpmd[com][b])
sfc+=fc
if com not in fcd:
fcd[com]={}
fcd[com][key]=fc
else:
fcd[com][key]=fc
if com in fcd:
avfc=sfc/float(len(fcd[com]))
fcd[com]["avg"]=avfc
return fcd
fcd=lgFC(cpmd,refd)
def npval(cpmd,refd):
pvd={}
for com in cpmd:
pvd[com]=[[],[]]
for a in refd["0"]:
if a in cpmd[com]:
pvd[com][0].append(float(cpmd[com][a]))
for i in refd["1"]:
if i in cpmd[com]:
pvd[com][1].append(float(cpmd[com][i]))
for com in pvd:
a=np.array(pvd[com][0])
b=np.array(pvd[com][1])
(ts,pv)=stats.ttest_ind(a,b,equal_var=False)
npv=-(math.log(pv+1))
pvd[com].append(npv)
return pvd
pvd=npval(cpmd,refd)
outfile=open("FCPV.csv","w")
row="key"
for i in range(nwise):
row+=",guideRNA"+str(nwise-i)
row+=",log2FC,-log10pval"
outfile.write(row+"\r\n")
for key in pvd:
row=key.strip("_")
k=key.split("_")
for i in k:
row+=","+i
if key in fcd and key in pvd:
row+=","+str(fcd[key]["avg"])+","+str(pvd[key][-1])
outfile.write(row+"\r\n")
elif key in fcd and key not in pvd:
row+=","+str(fcd[key]["avg"])+",nan"
outfile.write(row+"\r\n")
outfile.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
1a53e8becfa201f64650948dfd44782938e5f479
|
a4a28e2289fe2b41e3c89f57a3c160bb79520f90
|
/TLSW_pred/pythonhttp.py
|
b633584baf90f87c0b264fdcf061cc4002ed9ca1
|
[
"Apache-2.0"
] |
permissive
|
allinpaybusiness/ACS
|
69d1e3aca5b1e81f5fa41209c44746cb06a1819a
|
b9c8fa2a8e316ade366f261b0b7dc54a61fa11ba
|
refs/heads/master
| 2021-01-19T21:28:58.539487
| 2017-06-07T02:51:10
| 2017-06-07T02:51:10
| 82,506,792
| 0
| 14
| null | 2017-06-07T07:39:35
| 2017-02-20T02:10:16
|
Python
|
UTF-8
|
Python
| false
| false
| 850
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 14:34:07 2017
@author: s
"""
import socket
import re
HOST, PORT = '', 8888
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(10)
print("Serving HTTP on port %s ..." % PORT)
while True:
client_connection, client_address = listen_socket.accept()
request = client_connection.recv(1024)
# request.split("\r\n")
st = re.split(" /|,|\r\n".encode('utf-8'),request)
print(st[1])
# request.split("\r\n".encode('utf-8')), request)
http_response = """\
HTTP/1.1 200 OK
Hello, World!
"""
client_connection.sendall(http_response.encode('utf-8'))
client_connection.close()
|
[
"fengyz@allinpay.com"
] |
fengyz@allinpay.com
|
14d5c5fbf7781549cf6e357efcc0716b5f1896ae
|
72e8e6829ab51ef041f9388f09e303a0cb07ff58
|
/CNN/ProgressBar.py
|
cdee2977e2c7ffbc081afd704f699487ac023d66
|
[] |
no_license
|
jgslunde/Projects
|
4c3eefdcf4984e52ed7725a5606ff08f10ff528e
|
ca927120e8524f24ea194edcf5ab476290ae8e72
|
refs/heads/master
| 2021-09-23T23:39:00.053312
| 2018-09-29T09:47:35
| 2018-09-29T09:47:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import time
import sys
import subprocess
class ProgressBar:
def __init__(self, total, update_freq):
self.total = total
self.update_freq = update_freq
rows, colomns = subprocess.check_output(['stty', 'size']).split() # terminal size.
self.rows = int(rows.decode("utf-8"))
self.colomns = int(colomns.decode("utf-8"))
self.bar_width = (self.colomns - 12)
def update_progress_bar(self, progress):
sys.stdout.write("\b"*(self.bar_width+12))
current_bars = ((progress*self.bar_width) + self.total//2 )//self.total
sys.stdout.write(" %5.1f%%" %(float(progress)/(self.total)*100))
sys.stdout.write(" [%s]" % (" "*self.bar_width))
sys.stdout.flush()
sys.stdout.write("\b"*(self.bar_width+1))
sys.stdout.write(":"*current_bars)
sys.stdout.flush()
if (self.total - progress) < self.update_freq:
sys.stdout.write("\n")
if __name__ == "__main__":
import time
from ProgressBar import ProgressBar
N = 345345
update_freq = N//100
start_time1 = time.clock()
for i in range(N+1):
x = N/(i-2.5)
end_time1 = time.clock()
bar = ProgressBar(N, update_freq)
start_time2 = time.clock()
for i in range(N+1):
x = N/(i-2.5)
if i%update_freq == 0:
bar.update_progress_bar(i)
end_time2 = time.clock()
print("%d loops.\nNo bar = %.6f seconds\nWith bar = %.6f seconds" % (N, (end_time1-start_time1), (end_time2-start_time2)))
|
[
"jonas.s.lunde@outlook.com"
] |
jonas.s.lunde@outlook.com
|
09267857397c18219dcb468ef2b121a2fea8f574
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/locales/managers.py
|
b80a4b8368ef07497f74fee837058582ac4e31a0
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179
| 2021-10-02T03:51:12
| 2021-10-02T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from django.contrib.auth import get_user_model
from django.db import models
class UserLocaleProfileManager(models.Manager):
def get_by_natural_key(self, user_natural_key):
User = get_user_model()
try:
user = User.objects.get_by_natural_key(user_natural_key)
except User.DoesNotExist:
raise self.model.DoesNotExist
return self.get(user__pk=user.pk)
|
[
"79801878+Meng87@users.noreply.github.com"
] |
79801878+Meng87@users.noreply.github.com
|
5682f517f5c1795e283d9fbc3d17cb77b2c67060
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/Wyx_w_M_w_Sob_to_Wz_focus/IN_Sob_k15_EroM/Sob_k15_s001_EroM/pyr_Tcrop255_p60_j15/pyr_5s/L3/step10_a.py
|
0112794823a31b19968d9437b4bb2fa90a4fd65b
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,850
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_5side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_W_gt_W_ch_norm_v2
use_loss_obj = [G_sobel_k15_erose_M_loss_info_builder.set_loss_target("UNet_Wz").copy()]
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
##################################
### 1side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_2__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_2__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_3__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_3__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_3__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_4__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_4__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_4__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_4__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
a161394d8fa001e2248fd31803456fde533a463f
|
3ca80a9a55652e21a7c69008842e86ac72f744d3
|
/real_python_courses/Class_str_repr.py
|
cd0b1a98ca16820e38536896cb95a5832737b7bc
|
[] |
no_license
|
pshapard/New_projects
|
29ef9f7793e137124d33ca461e3dd686023789c5
|
8cf0e244cde07dd0f750de6a064deba970c0cf7a
|
refs/heads/master
| 2022-02-11T06:13:35.278903
| 2022-01-25T19:21:14
| 2022-01-25T19:21:14
| 253,964,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
class Car:
def __init__(self, color, mileage):
self.color = color
self.mileage = mileage
def __repr__(self):
my_car = Car('red', 37281)
print(my_car)
#__str__ ==> easy to read, for human consuption
#__repr__ ==> unambiguous, more for developers to read
import datetime
today = datetime.date.today()
print(str(today))
print(repr(today))
print(today)
|
[
"pshapard@outlook.com"
] |
pshapard@outlook.com
|
ce7a32bc88de6857019ede124e9f27c3f04d7544
|
490f7e5712b2f297adce60ff6c448243076b092b
|
/third_party/onnx/onnx/backend/test/case/node/pow.py
|
8eb61089119a1f48348fb6d45846d85f7cd77273
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
olibd/pytorchDocker
|
8725993df15fa28b528ef42f98f2014e4206de0d
|
5ae4c37d4f081128dc1e6d24b158adebd23a3c10
|
refs/heads/v0.4.0DockerBuild
| 2022-12-25T06:11:37.436842
| 2019-12-09T19:13:48
| 2019-12-09T19:23:43
| 226,938,896
| 0
| 1
|
NOASSERTION
| 2022-12-13T23:17:18
| 2019-12-09T18:23:06
|
C++
|
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Pow(Base):
@staticmethod
def export():
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = np.power(x, y) # expected output [1., 32., 729.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_example')
x = np.arange(60).reshape(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = np.power(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_pow')
@staticmethod
def export_pow_broadcast():
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
broadcast=1,
)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([2]).astype(np.float32)
z = np.power(x, y) # expected output [1., 4., 9.]
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_bcast')
node = onnx.helper.make_node(
'Pow',
inputs=['x', 'y'],
outputs=['z'],
broadcast=1,
axis=0,
)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([2, 3]).astype(np.float32)
z = np.array([[1, 4, 9], [64, 125, 216]]).astype(np.float32)
expect(node, inputs=[x, y], outputs=[z],
name='test_pow_bcast_axis0')
|
[
"olibd@hotmail.com"
] |
olibd@hotmail.com
|
cd5ab0ff640c9c8555b6af3aad71c70091b91ec4
|
2760effda15d884af413ca2a35809d03fabea377
|
/lc-1222.py
|
fb44d86b4ecfc652aaac148671173ef0b40bbe00
|
[] |
no_license
|
UtsavRaychaudhuri/leetcode
|
31943b98ad89d96d72ee4b6b1d1c8d70429d1e1f
|
77a13580fd6231830558b1cf8c84f8b3b62b99d0
|
refs/heads/master
| 2020-11-27T18:02:23.712639
| 2020-09-29T19:39:49
| 2020-09-29T19:39:49
| 229,552,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
class Solution(object):
def __init__(self):
self.outarray=[]
def queensAttacktheKing(self, queens, king):
"""
:type queens: List[List[int]]
:type king: List[int]
:rtype: List[List[int]]
"""
self.checkleft(king,queens)
self.checkup(king,queens)
self.checkdown(king,queens)
self.checkright(king,queens)
self.checkdiagonal(king,queens)
return self.outarray
def checkleft(self,king,queens):
j=king[1]
for i in range(king[0],-1,-1):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkright(self,king,queens):
i=king[0]
for j in range(king[1],10):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkup(self,king,queens):
j=king[1]
for i in range(king[0],10):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkdown(self,king,queens):
i=king[0]
for j in range(king[1],-1,-1):
if [i,j] in queens:
self.outarray.append([i,j])
break
def checkdiagonal(self,king,queens):
i=king[0]
j=king[1]
while(i>=0 and j>=0):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i-=1
j-=1
i,j=king[0],king[1]
while(i<=9 and j<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i+=1
j+=1
i,j=king[0],king[1]
while(j>=0 and i<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
i+=1
j-=1
i,j=king[0],king[1]
while(i>=0 and j<=9):
if [i,j] in queens and [i,j] not in self.outarray:
self.outarray.append([i,j])
break
j+=1
i-=1
|
[
"utsav@pdx.edu"
] |
utsav@pdx.edu
|
2686e9a70975ed195e375cd6f2821ad404bd6b2b
|
c6db96a3ee9035862e7951bb944f7448335bc313
|
/smc/elements/profiles.py
|
741537250e60591eb88cb41b2a7fb0b3f3b451c4
|
[
"Apache-2.0"
] |
permissive
|
azgaviperr/fp-NGFW-SMC-python
|
6e8c1b4e7013eadbfacff490eff6dda78db5f014
|
f8eb3cf725730c7bc7be89c05034ac1eb74163ac
|
refs/heads/master
| 2023-08-18T13:51:20.350868
| 2021-10-14T17:41:09
| 2021-10-14T17:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,206
|
py
|
"""
Profiles are templates used in other parts of the system to provide default
functionality for specific feature sets. For example, to enable DNS Relay on
an engine you must specify a DNSRelayProfile to use which defines the common
settings (or sub-settings) for that feature.
A DNS Relay Profile allows multiple DNS related mappings that can be configured.
Example usage::
>>> from smc.elements.profiles import DNSRelayProfile
>>> profile = DNSRelayProfile('mynewprofile')
.. note:: If the DNSRelayProfile does not exist, it will automatically be
created when a DNS relay rule is added to the DNSRelayProfile instance.
Add a fixed domain answer rule::
>>> profile.fixed_domain_answer.add([('microsoft3.com', 'foo.com'), ('microsoft4.com',)])
>>> profile.fixed_domain_answer.all()
[{u'domain_name': u'microsoft3.com', u'translated_domain_name': u'foo.com'},
{u'domain_name': u'microsoft4.com'}]
Translate hostnames (not fqdn) to a specific IP address::
>>> profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.12')])
>>> profile.hostname_mapping.all()
[{u'hostnames': u'hostname1,hostname2', u'ipaddress': u'1.1.1.12'}]
Translate an IP address to another::
>>> profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
>>> profile.dns_answer_translation.all()
[{u'translated_ipaddress': u'172.18.1.20', u'original_ipaddress': u'12.12.12.12'}]
Specify a DNS server to handle specific domains::
>>> profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
>>> profile.domain_specific_dns_server.all()
[{u'dns_server_addresses': u'172.18.1.20', u'domain_name': u'myfoo.com'}]
"""
from smc.base.model import Element, ElementCreator
from smc.api.exceptions import ElementNotFound
from smc.base.util import element_resolver
class DNSRule(object):
"""
DNSRule is the parent class for all DNS relay rules.
"""
__slots__ = "profile"
def __init__(self, profile):
self.profile = profile
def add(self, instance, answers):
key, left, right = instance._attr
json = [dict(zip([left, right], d)) for d in answers]
try:
self.profile.data[key].extend(json)
self.profile.update()
except ElementNotFound:
j = {"name": self.profile.name, key: json}
return ElementCreator(self.profile.__class__, j)
def all(self):
"""
Return all entries
:rtype: list(dict)
"""
attribute = self._attr[0]
return self.profile.data.get(attribute, [])
class FixedDomainAnswer(DNSRule):
"""
Direct requests for specific domains to IPv4 addresses, IPv6
addresses, fully qualified domain names (FQDNs), or empty DNS replies
"""
_attr = ("fixed_domain_answer", "domain_name", "translated_domain_name")
def add(self, answers):
"""
Add a fixed domain answer. This should be a list of
two-tuples, the first entry is the domain name, and
the second is the translated domain value::
profile = DNSRelayProfile('dnsrules')
profile.fixed_domain_answer.add([
('microsoft.com', 'foo.com'), ('microsoft2.com',)])
:param answers: (domain_name, translated_domain_name)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
.. note:: translated_domain_name can be none, which will cause
the NGFW to return NXDomain for the specified domain.
"""
super(FixedDomainAnswer, self).add(self, answers)
class HostnameMapping(DNSRule):
"""
Statically map host names, aliases for host names, and unqualified
names (a host name without the domain suffix) to IPv4 or IPv6
addresses
"""
_attr = ("hostname_mapping", "hostnames", "ipaddress")
def add(self, answers):
"""
Map specific hostname to specified IP address. Provide a list
of two-tuples. The first entry is the hostname/s to translate
(you can provide multiple comma separated values). The second
entry should be the IP address to map the hostnames to::
profile = DNSRelayProfile('dnsrules')
profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.1')])
:param answers: (hostnames, ipaddress), hostnames can be a
comma separated list.
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(HostnameMapping, self).add(self, answers)
class DomainSpecificDNSServer(DNSRule):
"""
Forward DNS requests to different DNS servers based on
the requested domain.
"""
_attr = ("domain_specific_dns_server", "domain_name", "dns_server_addresses")
def add(self, answers):
"""
Relay specific domains to a specified DNS server. Provide
a list of two-tuple with first entry the domain name to relay
for. The second entry is the DNS server that should handle the
query::
profile = DNSRelayProfile('dnsrules')
profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
:param answers: (domain_name, dns_server_addresses), dns server
addresses can be a comma separated string
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DomainSpecificDNSServer, self).add(self, answers)
class DNSAnswerTranslation(DNSRule):
"""
Map IPv4 addresses resolved by external DNS servers to IPv4
addresses in the internal network.
"""
_attr = ("dns_answer_translation", "original_ipaddress", "translated_ipaddress")
def add(self, answers):
"""
Takes an IPv4 address and translates to a specified IPv4 value.
Provide a list of two-tuple with the first entry providing the
original address and second entry specifying the translated address::
profile = DNSRelayProfile('dnsrules')
profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
:param answers: (original_ipaddress, translated_ipaddress)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DNSAnswerTranslation, self).add(self, answers)
class DNSRelayProfile(Element):
"""
DNS Relay Settings specify a profile to handle how the engine will
interpret DNS queries. The engine can act as a DNS relay, rewrite
DNS queries or redirect domains to the specified DNS servers.
"""
typeof = "dns_relay_profile"
@property
def fixed_domain_answer(self):
"""
Add a fixed domain answer entry.
:rtype: FixedDomainAnswer
"""
return FixedDomainAnswer(self)
@property
def hostname_mapping(self):
"""
Add a hostname to IP mapping
:rtype: HostnameMapping
"""
return HostnameMapping(self)
@property
def domain_specific_dns_server(self):
"""
Add domain to DNS server mapping
:rtype: DomainSpecificDNSServer
"""
return DomainSpecificDNSServer(self)
@property
def dns_answer_translation(self):
"""
Add a DNS answer translation
:rtype: DNSAnswerTranslation
"""
return DNSAnswerTranslation(self)
class SNMPAgent(Element):
"""
Minimal implementation of SNMPAgent
"""
typeof = "snmp_agent"
@classmethod
def create(
cls,
name,
snmp_users=[],
trap_destinations=[],
snmp_monitoring_contact=None,
snmp_monitoring_listening_port=161,
snmp_version="v3",
monitoring_user_names=[],
trap_user_names=[],
comment=None,
):
json = {
"boot": False,
"go_offline": False,
"go_online": False,
"hardware_alerts": False,
"name": name,
"policy_applied": False,
"shutdown": False,
"snmp_monitoring_contact": snmp_monitoring_contact,
"snmp_monitoring_listening_port": snmp_monitoring_listening_port,
"snmp_monitoring_user_name": monitoring_user_names,
"snmp_trap_destination": trap_destinations,
"snmp_user_name": snmp_users,
"snmp_version": snmp_version,
"user_login": False,
}
return ElementCreator(cls, json)
class SandboxService(Element):
typeof = "sandbox_service"
@classmethod
def create(cls, name, sandbox_data_center, portal_username=None, comment=None):
"""
Create a Sandbox Service element
"""
json = {
"name": name,
"sandbox_data_center": element_resolver(sandbox_data_center),
"portal_username": portal_username if portal_username else "",
"comment": comment,
}
return ElementCreator(cls, json)
class SandboxDataCenter(Element):
typeof = "sandbox_data_center"
|
[
"lmartinson@forcepoint.com"
] |
lmartinson@forcepoint.com
|
81e244eb03a8e7a0240cc64534523a67fb6e8b2b
|
ca3d8da633cb2b71ff876338c3f2f170298282fd
|
/compositionality_over_time/coha_preprocessing/feature_extracter_dense_embeddings.py
|
7830cb5f0ab2b3dc1f205aa8c5f81d12a877eda6
|
[
"Apache-2.0"
] |
permissive
|
prajitdhar/Compounding
|
92e4c34d95a7ff8a3ed2daea044f868d64731f22
|
7b54d2cc84d0d68cc4b847f714dce3e1a7a42dd2
|
refs/heads/master
| 2023-08-07T17:32:01.048741
| 2023-07-30T22:08:30
| 2023-07-30T22:08:30
| 139,637,197
| 2
| 3
|
Apache-2.0
| 2023-09-06T09:45:26
| 2018-07-03T21:08:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 15,323
|
py
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA,TruncatedSVD
from sklearn.preprocessing import Normalizer
import argparse
import time
import pickle
import re
from functools import reduce
import pickle as pkl
def year_binner(year,val=10):
if val==0:
return 0
else:
return year - year%val
def dim_reduction(df):
dtype = pd.SparseDtype(np.float64, fill_value=0)
df=df.astype(dtype)
df_sparse, rows, cols = df.sparse.to_coo(row_levels=['common','time'],column_levels=['context'],sort_labels=False)
print(len(cols))
rcomp = re.compile(".+\s.+")
compound_rows=[]
compound_time=[]
constituent_rows=[]
constituent_time=[]
for r in rows:
if re.match(rcomp, r[0]):
compound_rows.append(r[0])
compound_time.append(r[1])
else:
constituent_rows.append(r[0])
constituent_time.append(r[1])
assert (len(compound_rows)+len(constituent_rows))==df_sparse.shape[0]
train_df=df_sparse.tocsr()[0:len(compound_rows),:]
test_df=df_sparse.tocsr()[len(compound_rows):,:]
assert (train_df.shape[0]+test_df.shape[0])==df_sparse.shape[0]
svd = TruncatedSVD(n_components=300, algorithm='arpack', random_state=args.seed)
print(f'Explained variance ratio {(svd.fit(train_df).explained_variance_ratio_.sum()):2.3f}')
compound_reduced = svd.fit_transform(train_df)
compound_reduced = Normalizer(copy=False).fit_transform(compound_reduced)
compound_reduced=pd.DataFrame(compound_reduced,index=list(zip(compound_rows,compound_time)))
compound_reduced.index = pd.MultiIndex.from_tuples(compound_reduced.index, names=['compound', 'time'])
compound_reduced.reset_index(inplace=True)
compound_reduced[['modifier','head']]=compound_reduced['compound'].str.split(' ',expand=True)
compound_reduced.drop(['compound'],axis=1,inplace=True)
compound_reduced.set_index(['modifier','head','time'],inplace=True)
#compound_reduced.reset_index(inplace=True)
constituents_reduced=svd.transform(test_df)
constituents_reduced = Normalizer(copy=False).fit_transform(constituents_reduced)
constituents_reduced=pd.DataFrame(constituents_reduced,index=list(zip(constituent_rows,constituent_time)))
constituents_reduced.index = pd.MultiIndex.from_tuples(constituents_reduced.index, names=['constituent', 'time'])
constituents_reduced.reset_index(inplace=True)
return compound_reduced,constituents_reduced
def productivity_features(df):
print("Productivity")
all_comps=df.reset_index()[['modifier','head','time']]
mod_prod=df.groupby(['modifier','time']).size().to_frame()
mod_prod.columns=['mod_prod']
head_prod=df.groupby(['head','time']).size().to_frame()
head_prod.columns=['head_prod']
prod1=pd.merge(all_comps,mod_prod.reset_index(),how='left',on=['modifier','time'])
productivity=pd.merge(prod1,head_prod.reset_index(),how='left',on=['head','time'])
productivity.set_index(['modifier','head','time'],inplace=True)
return productivity
def freq_features(df):
print("Frequency features")
compound_decade_counts=df.groupby('time').sum().sum(axis=1).to_frame()
compound_decade_counts.columns=['N']
XY=df.groupby(['modifier','head','time']).sum().sum(axis=1).to_frame()
X_star=df.groupby(['modifier','time']).sum().sum(axis=1).to_frame()
Y_star=df.groupby(['head','time']).sum().sum(axis=1).to_frame()
XY.columns=['a']
X_star.columns=['x_star']
Y_star.columns=['star_y']
merge1=pd.merge(XY.reset_index(),X_star.reset_index(),on=['modifier','time'])
frequency_feat=pd.merge(merge1,Y_star.reset_index(),on=['head','time'])
frequency_feat=frequency_feat.rename(columns = {'a':'comp_freq','x_star':'mod_freq','star_y':'head_freq'})
frequency_feat.set_index(['modifier','head','time'],inplace=True)
return frequency_feat
def it_features(df):
print("Information Theory features")
compound_decade_counts=df.groupby('time').sum().sum(axis=1).to_frame()
compound_decade_counts.columns=['N']
XY=df.groupby(['modifier','head','time']).sum().sum(axis=1).to_frame()
X_star=df.groupby(['modifier','time']).sum().sum(axis=1).to_frame()
Y_star=df.groupby(['head','time']).sum().sum(axis=1).to_frame()
XY.columns=['a']
X_star.columns=['x_star']
Y_star.columns=['star_y']
merge1=pd.merge(XY.reset_index(),X_star.reset_index(),on=['modifier','time'])
information_feat=pd.merge(merge1,Y_star.reset_index(),on=['head','time'])
information_feat['b']=information_feat['x_star']-information_feat['a']
information_feat['c']=information_feat['star_y']-information_feat['a']
information_feat=pd.merge(information_feat,compound_decade_counts.reset_index(),on=['time'])
information_feat['d']=information_feat['N']-(information_feat['a']+information_feat['b']+information_feat['c'])
information_feat['x_bar_star']=information_feat['N']-information_feat['x_star']
information_feat['star_y_bar']=information_feat['N']-information_feat['star_y']
information_feat.set_index(['modifier','head','time'],inplace=True)
information_feat['ppmi']=np.log2((information_feat['a']*information_feat['N']+1)/(information_feat['x_star']*information_feat['star_y']+1))
information_feat['local_mi']=information_feat['a']*information_feat['ppmi']
information_feat['log_ratio']=2*(information_feat['local_mi']+\
information_feat['b']*np.log2((information_feat['b']*information_feat['N']+1)/(information_feat['x_star']*information_feat['star_y_bar']+1))+\
information_feat['c']*np.log2((information_feat['c']*information_feat['N']+1)/(information_feat['x_bar_star']*information_feat['star_y']+1))+\
information_feat['d']*np.log2((information_feat['d']*information_feat['N']+1)/(information_feat['x_bar_star']*information_feat['star_y_bar']+1)))
information_feat.ppmi.loc[information_feat.ppmi<=0]=0
information_feat.drop(['a','x_star','star_y','b','c','d','N','d','x_bar_star','star_y_bar'],axis=1,inplace=True)
return information_feat
def cosine_features(compound_df,modifier_df,head_df):
print("Cosine Similarity features")
compound_modifier_sim=(compound_df*modifier_df).dropna().sum(axis=1).to_frame()
compound_modifier_sim.columns=['sim_with_modifier']
compound_modifier_sim=compound_modifier_sim.swaplevel('time','head')
compound_head_sim=(compound_df*head_df).dropna().sum(axis=1).to_frame()
compound_head_sim.columns=['sim_with_head']
compound_head_sim=compound_head_sim.swaplevel('time','modifier')
compound_head_sim=compound_head_sim.swaplevel('head','modifier')
constituent_sim=compounds_reduced.reset_index()[['modifier','head','time']].merge(modifiers_reduced.reset_index(),how='left',on=['modifier','time'])
constituent_sim.set_index(['modifier','head','time'],inplace=True)
constituent_sim=(constituent_sim*heads_reduced).dropna().sum(axis=1).to_frame()
constituent_sim.columns=['sim_bw_constituents']
constituent_sim=constituent_sim.swaplevel('time','modifier')
constituent_sim=constituent_sim.swaplevel('head','modifier')
return compound_modifier_sim,compound_head_sim,constituent_sim
parser = argparse.ArgumentParser(description='Compute features from sparse dataset via SVD')
parser.add_argument('--temporal', type=int,default=0,
help='Value to bin the temporal information: 0 (remove temporal information), 1 (no binning), 10 (binning to decades), 20 (binning each 20 years) or 50 (binning each 50 years)')
parser.add_argument('--cutoff', type=int, default=50,
help='Cut-off frequency for each compound per time period : none (0), 20, 50 and 100')
parser.add_argument('--seed', type=int, default=1991,
help='random seed')
parser.add_argument('--contextual', action='store_true',
help='Is the model contextual')
parser.add_argument('--inputdir',type=str,
help='Provide directory where features are located')
parser.add_argument('--outputdir',type=str,
help='Where should the output be stored?')
args = parser.parse_args()
print(f'Cutoff: {args.cutoff}')
print(f'Time span: {args.temporal}')
temp_cutoff_str=str(args.temporal)+'_'+str(args.cutoff)
context_list = pickle.load( open( f'{args.inputdir}context.pkl', "rb" ) )
if args.contextual:
context='CompoundAware'
else:
context='CompoundAgnostic'
save_path=context+'_Dense_'+temp_cutoff_str
if args.contextual:
print("CompoundCentric Model")
print('Reading compounds')
compounds=pd.read_pickle(args.inputdir+"/compounds.pkl")
print(compounds.shape[0])
compounds.context=compounds.context.str.replace(r'.+_NUM','NUM',regex=True)
compounds=compounds.loc[compounds.context.isin(context_list)]
print(compounds.shape[0])
compounds.modifier=compounds.modifier.str.replace(r'_.+','',regex=True)
compounds['head']=compounds['head'].str.replace(r'_.+','',regex=True)
if args.temporal==0:
print('No temporal information is stored')
else:
print(f'Temporal information is stored with intervals {args.temporal}')
#compounds=compounds.loc[~compounds.modifier.str.contains('^(?:of|the|-)_.+')]
#compounds=compounds.loc[~compounds['head'].str.contains('^(?:of|the|-)_.+')]
compounds.year=compounds.year.astype("int32")
#compounds.query('1800 <= year <= 2010',inplace=True)
compounds['time']=year_binner(compounds['year'].values,args.temporal)
compounds=compounds.loc[compounds.groupby(['modifier','head','time'])['count'].transform('sum').gt(args.cutoff)]
print(compounds.shape[0])
compounds=compounds.groupby(['modifier','head','time','context'])['count'].sum().to_frame().reset_index()
print(compounds.shape[0])
modifier_lst=compounds.modifier.unique().tolist()
print(f'Number of unique modifiers {len(modifier_lst)}')
head_lst=compounds['head'].unique().tolist()
len(head_lst)
print(f'Number of unique heads {len(head_lst)}')
compounds['common']=compounds['modifier']+" "+compounds['head']
compounds=compounds.groupby(['common','time','context'])['count'].sum()
print('Done reading compounds')
print('Reading modifiers')
modifiers=pd.read_pickle(args.inputdir+"/modifiers.pkl")
print(modifiers.shape[0])
modifiers.context=modifiers.context.str.replace(r'.+_NUM','NUM',regex=True)
modifiers=modifiers.loc[modifiers.context.isin(context_list)]
print(modifiers.shape[0])
modifiers.modifier=modifiers.modifier.str.replace(r'_.+','',regex=True)
modifiers.year=modifiers.year.astype("int32")
#modifiers.query('1800 <= year <= 2010',inplace=True)
modifiers['time']=year_binner(modifiers['year'].values,args.temporal)
modifiers=modifiers.groupby(['modifier','time','context'])['count'].sum().to_frame().reset_index()
modifiers.columns=['common','time','context','count']
print(modifiers.shape[0])
modifiers=modifiers.loc[modifiers.common.isin(modifier_lst)]
print(modifiers.shape[0])
modifiers.common=modifiers.common+"_m"
modifiers=modifiers.groupby(['common','time','context'])['count'].sum()
print('Done reading modifiers')
print('Reading heads')
heads=pd.read_pickle(args.inputdir+"/heads.pkl")
print(heads.shape[0])
heads.context=heads.context.str.replace(r'.+_NUM','NUM',regex=True)
heads=heads.loc[heads.context.isin(context_list)]
print(heads.shape[0])
heads['head']=heads['head'].str.replace(r'_.+','',regex=True)
heads.year=heads.year.astype("int32")
#heads.query('1800 <= year <= 2010',inplace=True)
heads['time']=year_binner(heads['year'].values,args.temporal)
heads=heads.groupby(['head','time','context'])['count'].sum().to_frame().reset_index()
heads.columns=['common','time','context','count']
print(heads.shape[0])
heads=heads.loc[heads.common.isin(modifier_lst)]
print(heads.shape[0])
heads.common=heads.common+"_h"
heads=heads.groupby(['common','time','context'])['count'].sum()
print('Done reading heads')
print('Concatenating all the datasets together')
df=pd.concat([compounds,heads,modifiers], sort=False)
else:
print("CompoundAgnostic Model")
print('Reading phrases')
compounds=pd.read_pickle(args.inputdir+"/phrases.pkl")
print(compounds.shape[0])
compounds.context=compounds.context.str.replace(r'.+_NUM','NUM',regex=True)
compounds=compounds.loc[compounds.context.isin(context_list)]
print(compounds.shape[0])
compounds.modifier=compounds.modifier.str.replace(r'_.+','',regex=True)
compounds['head']=compounds['head'].str.replace(r'_.+','',regex=True)
if args.temporal==0:
print('No temporal information is stored')
else:
print(f'Temporal information is stored with intervals {args.temporal}')
#compounds=compounds.loc[~compounds.modifier.str.contains('^(?:of|the|-)_.+')]
#compounds=compounds.loc[~compounds['head'].str.contains('^(?:of|the|-)_.+')]
compounds.year=compounds.year.astype("int32")
#compounds.query('1800 <= year <= 2010',inplace=True)
compounds['time']=year_binner(compounds['year'].values,args.temporal)
compounds=compounds.loc[compounds.groupby(['modifier','head','time'])['count'].transform('sum').gt(args.cutoff)]
print(compounds.shape[0])
compounds=compounds.groupby(['modifier','head','time','context'])['count'].sum().to_frame().reset_index()
constituents_lst=list(set(compounds.modifier.unique().tolist()+compounds['head'].unique().tolist()))
compounds['common']=compounds['modifier']+" "+compounds['head']
compounds=compounds.groupby(['common','time','context'])['count'].sum()
print('Done reading compounds')
print(f'Number of unique constituents {len(constituents_lst)}')
print('Reading constituents')
constituents=pd.read_pickle(args.inputdir+"/words.pkl")
print(constituents.shape[0])
constituents.context=constituents.context.str.replace(r'.+_NUM','NUM',regex=True)
constituents=constituents.loc[constituents.context.isin(context_list)]
print(constituents.shape[0])
constituents.word=constituents.word.str.replace(r'_.+','',regex=True)
constituents=constituents.loc[constituents.word.isin(constituents_lst)]
print(constituents.shape[0])
constituents.year=constituents.year.astype("int32")
#constituents.query('1800 <= year <= 2010',inplace=True)
constituents['time']=year_binner(constituents['year'].values,args.temporal)
constituents=constituents.groupby(['word','time','context'])['count'].sum().to_frame().reset_index()
constituents.columns=['common','time','context','count']
constituents=constituents.groupby(['common','time','context'])['count'].sum()
print(constituents.shape[0])
print('Done reading constituents')
print('Concatenating all the datasets together')
df=pd.concat([compounds,constituents], sort=False)
time_lst=compounds.index.unique(level='time').to_list()
|
[
"janis.pagel@ims.uni-stuttgart.de"
] |
janis.pagel@ims.uni-stuttgart.de
|
227b6a056fb6d25eccae7d16b9b3363a3999867e
|
929f43b02defa8b0f37ee734ad97fad8a68712e5
|
/mundo1/ex029.py
|
8b579974216c18a04aca1204d1bebace02cd6632
|
[
"MIT"
] |
permissive
|
Igor3550/Exercicios-de-python
|
c9395c3d75a20daf83d67716ffda94c624ebf3de
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
refs/heads/master
| 2022-11-10T07:52:51.334570
| 2020-06-25T21:41:44
| 2020-06-25T21:41:44
| 275,020,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
#escreva um programa que leia a velocidade de um carro
# Se ele ultrapassar 80km/h, mostre uma mensagem dizendo que ele foi multado
# A multa vai custar R$7,00 por cada km acima da velocidade
veloc = int(input('Digite a velocidade em km/h: '))
if veloc <= 80:
print('Você esta no limite de velocidade\nO limite é de 80km/h e sua velocidade foi {}km/h'.format(veloc))
else:
multa = (veloc - 80) * 7
print('Você foi multado pois excedeu o limite de velocidade de 80km/h\nSua velocidade: {}km/h\nValor da multa: R${:.2f}'.format(veloc, multa))
|
[
"igormeloigormelo@gmail.com"
] |
igormeloigormelo@gmail.com
|
53da9f21a7f87160ef47e40054bd952e2946138f
|
cf33f981a53f899d8e43b0580a22a8a5b708e304
|
/src/case.py
|
bb90bdfa7b019a96673dbb52b3aca6d161161038
|
[] |
no_license
|
YKherzaneColbert59/Sudoku
|
9e359fc2ea88af49ec77ae27710fcaa988e41848
|
290691943fc6aede2c790944f4e45974c11f3cb1
|
refs/heads/main
| 2023-08-30T14:54:52.235387
| 2021-11-20T08:47:24
| 2021-11-20T08:47:24
| 428,565,654
| 0
| 2
| null | 2021-11-16T10:14:14
| 2021-11-16T07:55:09
| null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
class Case:
def __init__(self, pos, value = None):
"""
Constructeur par défaut
Arguments :
- pos : position de la case (0--80)
- value : valeur de la case (1--9)
Tests :
>>> Case(0).position, Case(80).position, Case(25).position
(0, 80, 25)
>>> Case(13).value, Case(13, 3).value
(None, 3)
>>> Case(0).row, Case(80).row, Case(25).row
(0, 8, 7)
>>> Case(0).line, Case(80).line, Case(25).line
(0, 8, 2)
>>> Case(0).region, Case(80).region, Case(25).region
(1, 9, 3)
"""
self.position = pos
self.row = pos%9 # Colonne
self.line = pos//9 # Ligne
self.value = value
self.region = (self.line//3)*3+self.row//3+1
self.valid = True # Validité par défaut
def setValue(self, value):
"""
Mutateur de l'attribut value
Tests :
>>> c = Case(13, 2)
>>> c.setValue(8)
>>> c.value == 2
False
>>> c.value == 8
True
"""
self.value = value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"kherzaneyani@gmail.com"
] |
kherzaneyani@gmail.com
|
1cea74f4cade6ca228fd43ae326070c0be4b24c4
|
f85df4b1eedbabeef82f9ec8d54e10a7a9544e6e
|
/gameplay/field.py
|
09f0f7a813526b3aa0d116da972af16c5c10ee5e
|
[
"MIT"
] |
permissive
|
hristy93/FallingRocks
|
ce387930ea7eb8173a7244999141556b6736cf4b
|
42e3dd282e43717cec001578a17ffd3e00a8b410
|
refs/heads/master
| 2020-04-04T17:47:36.220197
| 2016-02-17T16:45:28
| 2016-02-17T16:45:28
| 35,620,170
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
from player import Player
from rock import Rock
from powerup import Powerup, PowerupType
from bullet import Bullet
class Field:
def __init__(self, width, height):
self.width, self.height = width, height
self.rocks = []
self.powerups = []
self.bullets = []
self.__player = Player()
self.__rock = Rock()
self.__powerup = Powerup(PowerupType.no_powerup)
self.__bullet = Bullet()
def set_rock_speed(self, new_speed):
"""Sets the rock's speed to the value of new_speed."""
self.__rock.set_speed(new_speed)
@property
def rock_speed(self):
"""Gets the rock's speed."""
return self.__rock.rock_speed
@property
def player_speed(self):
"""Gets the player's speed."""
return self.__player.player_speed
@property
def bullet_speed(self):
"""Gets the bullet's speed."""
return self.__bullet.bullet_speed
@property
def player(self):
"""Gets the player's object."""
return self.__player
@property
def rock(self):
"""Gets the rock's object."""
return self.__rock
@property
def powerup(self):
"""Gets the powerup's object."""
return self.__powerup
@property
def bullet(self):
"""Gets the bullet's speed."""
return self.__bullet
|
[
"hristi@gbg.bg"
] |
hristi@gbg.bg
|
de90c6a29c7f48707d371e882267c257f4f629e0
|
90e6a3fc2d98be7cc33edac04985c31cce1bac92
|
/app/config.py
|
f10dfba9264a887006d6aea96e50a4362f516dc3
|
[] |
no_license
|
skarj/devops-examples
|
f2db47aef175048b437cf5bf98bb8a6410a2c65f
|
94d88defee4d2372930210c5598bc7a873b6be96
|
refs/heads/master
| 2023-05-24T23:23:09.136297
| 2022-05-07T15:18:47
| 2022-05-07T15:18:47
| 157,450,150
| 2
| 0
| null | 2023-05-01T21:13:57
| 2018-11-13T21:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 669
|
py
|
import os
class Config(object):
DEBUG = False
PORT = 5000
HOST = '0.0.0.0'
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
AWS_REGION = os.environ.get('AWS_REGION')
S3_BUCKET = os.environ.get('S3_BUCKET')
S3_ENDPOINT = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)
DYNAMODB_ENDPOINT = os.environ.get('DYNAMODB_ENDPOINT')
class Dev(Config):
S3_ENDPOINT = 'http://localhost:8008'
DYNAMODB_ENDPOINT = 'http://localhost:8000'
DEBUG = True
class Test(Config):
S3_ENDPOINT = os.environ.get('S3_ENDPOINT')
DEBUG = True
class Prod(Config):
HOST = '127.0.0.1'
|
[
"skaarj.sergey@gmail.com"
] |
skaarj.sergey@gmail.com
|
521c07cd2ff7dc5d7204594a7e6315b8882b463b
|
4bd969bb18178e15b750fcaa8b791cc100260e62
|
/mongoCRUD.py
|
90c486765a76e7922f411e76bb922f4f2a2bc641
|
[
"MIT"
] |
permissive
|
codefusser/UserManagerDocker
|
587f73ee29f23aa34b3df8306d6b432134c0392d
|
8ba4e3aaaff0419ccdf3ab885fe1fcc0332c32be
|
refs/heads/master
| 2021-08-30T04:22:54.084203
| 2017-12-16T10:23:39
| 2017-12-16T10:23:39
| 114,374,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
#Using Python to interact with the mongodb server with pymongo
from pymongo import MongoClient
#pprint module will help to improve readability
from pprint import pprint
DB_connectionString = "mongodb://127.0.0.1:27017"
client = MongoClient(DB_connectionString)
#use userManager
dbase = client.userManager
#creating a collection and creating a document
#userinfo = dbase.createCollection("usersinfo")
info1 = {
"_id": "59071791b0lkscm2325794",
"name": "John Doe",
"email": "john.doe@gmail.com",
"password": "johndoe",
"__v": "0"
}
docInsert = dbase.usersinfo.insert(info1)
#Performing other CRUD operations
#Retrieving a document
docObj = dbase.usersinfo.find({"_id":"59071791b0lkscm2325794"})
print("The data retrieved is: ")
pprint(docObj)
#Update a document with a new field
docUpdate = dbase.usersinfo.update({"website":"https://github.com", "__v":"0"})
#updating a document by replacing an existing field
docReplace = dbase.usersinfo.replace({"password": "johndoe", "password": "doejohn"})
#delete a document
docDelete = dbase.usersinfo.delete({"_id":"59071791b0lkscm2325794"})
|
[
"softorque@outlook.com"
] |
softorque@outlook.com
|
00da0d7ae1145d2e6bc178fca1111e2d7c21d7ad
|
01ee722200235e47d5d08d83a44ab72bec421aaf
|
/LocalServer/Inventory/views/home.py
|
255a74b77c13a8ba0caba119a252c51584804a5d
|
[] |
no_license
|
CG3002-Design-Project/CG3002
|
47d1466dbb0e2e3d53147468406d41ac73fb6247
|
70b92161689d9ae6515bd0a992682183f5aafde6
|
refs/heads/master
| 2018-12-28T17:35:15.774807
| 2013-12-08T11:51:45
| 2013-12-08T11:51:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.template import Context, loader, RequestContext
from Inventory.models import Inventory, RequestDetails
from Inventory.models import Product
from Inventory.models import Transaction
from datetime import date
from decimal import *
from django.views.decorators.csrf import csrf_exempt
import requests
import json
import os
import time
import serial
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
@login_required
def home_page(request):
return render(request,'home.html');
|
[
"poo.dav@gmail.com"
] |
poo.dav@gmail.com
|
498488d0e02adf53cce7096cd9c7afa81a6a5814
|
64267b1f7ca193b0fab949089b86bc7a60e5b859
|
/slehome/account/migrations/0046_auto_20150130_0600.py
|
4d7e8d0246f4262cdd73c9abdd7338982e3d2674
|
[] |
no_license
|
hongdangodori/slehome
|
6a9f2b4526c2783932627b982df0540762570bff
|
3e558c78c3943dadf0ec485738a0cc98dea64353
|
refs/heads/master
| 2021-01-17T12:00:34.221088
| 2015-02-06T13:44:00
| 2015-02-06T13:44:00
| 28,847,585
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0045_auto_20150130_0558'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='43f9a685bc7146b4ecc63bdf9bc3e5136b7543f436a42e4a2f2ae749ffb0c6db', max_length=64),
preserve_default=True,
),
]
|
[
"chungdangogo@gmail.com"
] |
chungdangogo@gmail.com
|
54f1cbd6a9ca5855b001c659cc3cfe4b7a0aae71
|
8452e5d4864ffd69cf528ac1a6504b59c44dfb96
|
/deam.py
|
64a9dcfea7f3871a7f7d67462f145f5a0eb505c9
|
[] |
no_license
|
zhang-python/ssh_lianxi
|
707c7d112eae3c3e42237e24d9fe5c556ad79f53
|
ee6822020b8d1513c3bc6a386faa1e89a9db1eb0
|
refs/heads/master
| 2020-09-08T02:07:00.622670
| 2019-11-11T12:24:34
| 2019-11-11T12:24:34
| 220,980,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
print('one')
print('two')
print('three')
print('four')
print('five')
print('six')
print('seven')
print('eight')
print('nine')
|
[
"784913775@qq.com"
] |
784913775@qq.com
|
abdba80e2f5adc1b736189b208ad548770d4a1a7
|
e4d51669c68bd36448f97b044aa835576db7c708
|
/instagram/converters.py
|
ec72631919e3a6997041bfdb49299e4a9acfd4ed
|
[] |
no_license
|
BigbrotherShin/django_practice-instadjango
|
92f156db5e2db4b963288628b46ab4acec9c8bd8
|
c3b6f5f5db88156f9092ac12cdf9159f484eb865
|
refs/heads/master
| 2022-11-13T21:44:29.691936
| 2020-07-06T09:00:04
| 2020-07-06T09:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
class YearConverter:
regex = r"20\d{2}"
def to_python(self, value):
return int(value)
def to_url(self, value):
return str(value)
class MonthConverter(YearConverter):
regex = r"\d{1,2}"
class DayConverter(YearConverter):
regex = r"[0123]\d"
|
[
"shinjhhp5@gmail.com"
] |
shinjhhp5@gmail.com
|
279eca7baaea7d7e78b5f31eab14828579fa7ef7
|
af79d5ad4c47c1d9db2cf6cdcc7c7c52658723c9
|
/pressTv/apps.py
|
a059e7ce5a0aa1da21d9203cdc591d921c15aff3
|
[
"MIT"
] |
permissive
|
jafarzadeh-1998/Coronavirus-News-Crawler
|
8104b4a730bf08e59903ea0c9c8f035ec4e09fb0
|
aae34075b0f39b4490b6b562a18a195addc8b554
|
refs/heads/main
| 2023-01-07T21:41:02.673658
| 2020-11-03T14:24:50
| 2020-11-03T14:24:50
| 308,889,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class PresstvConfig(AppConfig):
name = 'pressTv'
|
[
"a.jafarzadeh1998@gmail.com"
] |
a.jafarzadeh1998@gmail.com
|
b3e740a0b9efebccd943477359ab43b75987d7c2
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/7/sw5.py
|
6ff3c55a6f3707c4e80f76687713728c3404fcd7
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sw5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
d318bf8277ce10614e8bd8d84441f8df63363604
|
22448c6ca28029b0782409a2e4503317efd1f4df
|
/Programmes tests/Accès soap/clientTest.py
|
9666b128301f1819fffac942f826af7024e35a83
|
[] |
no_license
|
Assia-Megueni/bankNTR
|
396aef84dc542a943a6cbce97f984f3103dee2b5
|
c46b3c291573b14eea1d7d8a91c97b494fde9890
|
refs/heads/main
| 2023-04-19T02:04:06.598433
| 2021-04-30T12:10:29
| 2021-04-30T12:10:29
| 362,913,305
| 0
| 0
| null | 2021-04-29T18:37:35
| 2021-04-29T18:37:34
| null |
UTF-8
|
Python
| false
| false
| 3,412
|
py
|
from suds.client import Client
nomprojet = "servicesoap"
port = "8090"
urlCommandes = "http://localhost:"+port+"/"+nomprojet+"/services/Commandes?wsdl"
urlArticles = "http://localhost:"+port+"/"+nomprojet+"/services/Articles?wsdl"
urlFinances = "http://localhost:"+port+"/"+nomprojet+"/services/Finances?wsdl"
urlComptes = "http://localhost:"+port+"/"+nomprojet+"/services/Comptes?wsdl"
def r():
client = Client(urlCommandes)
print(client)
client = Client(urlArticles)
print(client)
client = Client(urlFinances)
print(client)
client = Client(urlComptes)
print(client)
def ajouterCompteClient(nom,prenom,mdp):
client = Client(urlComptes)
print(client.service.creerCompte(nom,prenom,mdp,True))
def ajouterCompteVendeur(nom,prenom,mdp):
client = Client(urlComptes)
print(client.service.creerCompte(nom,prenom,mdp,False))
def getComptesVendeur():
client = Client(urlComptes)
print(client.service.listeComptesVendeur())
def getComptesClient():
client = Client(urlComptes)
print(client.service.listeComptesClient())
def isPasswordOK(nom,prenom,mdp,isClient):
client = Client(urlComptes)
print(client.service.isCorrect(nom,prenom,mdp,isClient))
def getId(nom,prenom,isClient):
client = Client(urlComptes)
print(client.service.idPersonne(nom,prenom,isClient))
def creerProduit(nom,categorie,prix,idVendeur):
client = Client(urlArticles)
print(client.service.ajoutArticle(nom,categorie,prix,idVendeur))
def listerProduits():
client = Client(urlArticles)
print(client.service.getAllArticles())
def getAProduit(idd):
client = Client(urlArticles)
print(client.service.getArticle(idd))
def changerCategorieProd(idArticle,cate):
client = Client(urlArticles)
print(client.service.changerCatégorie(idArticle,cate))
listerProduits()
def changerPrixProd(idArticle,prix):
client = Client(urlArticles)
print(client.service.changerPrix(idArticle,prix))
listerProduits()
def ajoutStock(idArticle,nb):
client = Client(urlArticles)
print(client.service.ajoutStock(idArticle,nb))
listerProduits()
def rmStock(idArticle,nb):
client = Client(urlArticles)
print(client.service.retirerStock(idArticle,nb))
listerProduits()
# Commandes
def getCommandes():
client = Client(urlCommandes)
print(client.service.listeCommandes())
def getCommandesN(nom,prenom):
client = Client(urlCommandes)
print(client.service.commandesClient(nom,prenom))
def creerCommande(idClient):
client = Client(urlCommandes)
print(client.service.creerCommande(idClient))
getCommandes()
def ajoutArticle(idCommande,idArticle,qte):
client = Client(urlCommandes)
print(client.service.ajoutArticle(idCommande,idArticle,qte))
getCommandes()
def rmArticle(idCommande,idArticle,qte):
client = Client(urlCommandes)
print(client.service.retraitArticle(idCommande,idArticle,qte))
getCommandes()
#Paiements
def payer(idcommande):
client = Client(urlFinances)
print(client.service.payerCommande(idcommande))
getCommandes()
def rembourser(idcommande):
client = Client(urlFinances)
print(client.service.rembourserCommande(idcommande))
getCommandes()
|
[
"55113456+bverhul@users.noreply.github.com"
] |
55113456+bverhul@users.noreply.github.com
|
100b195881073cab936847756bee9bbca6c61300
|
c9f26cefc6325503e94e14d7bcc06c26f59ae701
|
/Shell/Kechocat.py
|
88eaf85b3538cce5a5c4b93dae5354382bfe70e5
|
[] |
no_license
|
nivbhaskhar/Tools
|
92743788fee80f2f339efd1fc0b5e01996f0e774
|
d6ec4ed338578830b3da3fa8af692319127aebf0
|
refs/heads/master
| 2022-12-06T11:34:30.590121
| 2020-09-04T04:52:00
| 2020-09-04T04:52:00
| 268,422,303
| 0
| 0
| null | 2020-06-02T14:08:49
| 2020-06-01T04:08:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 103
|
py
|
import sys
print(' '.join(sys.argv[1:]))
try:
while True:
print(input())
except EOFError:
pass
|
[
"nivbhaskhar@gmail.com"
] |
nivbhaskhar@gmail.com
|
4a0714091ddd90df0ea8c7a0b01751aad0843151
|
398089ec2210e1b6a12aecf8ed91cdeced6b36fc
|
/employer/views.py
|
37965cec7645b608559570279fbd8da925ea939d
|
[
"Apache-2.0"
] |
permissive
|
WilliamQLiu/job-waffle
|
7ca8cb6357884e99a9c054bbd25d10222816dde7
|
59e4bc550dc1c2131fa427f188fbc2bb287aa938
|
refs/heads/master
| 2022-05-04T12:18:53.018609
| 2015-04-10T03:18:34
| 2015-04-10T03:18:34
| 27,843,538
| 1
| 1
|
Apache-2.0
| 2021-06-10T17:29:08
| 2014-12-10T22:48:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,702
|
py
|
"""
A view takes a web request and returns a web response
The response can be a web page, a redirect, a 404 error, etc
GET is used for requests that do not affect the state of the system
POST is used for making changes in the database
Under the hood, Django just converts HTTP POST and GET objects into a
'QueryDict', which is a Django dict, which is a Python dict
"""
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render, render_to_response, RequestContext, Http404
from django.utils.decorators import method_decorator # Allow LoggedInMixin
from django.views.generic import TemplateView, View, ListView, UpdateView, DeleteView, CreateView
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
import django_filters
# For debugging
from django.http.request import QueryDict
from django.utils.datastructures import MultiValueDict
import logging
from .models import Job
from .forms import JobForm, JobSearchForm
from .serializers import JobSerializer
from rest_framework import viewsets, authentication, permissions, filters
from haystack.query import SearchQuerySet
from haystack.inputs import AutoQuery, Exact, Clean, Raw
# Debugging: Log levels (DEBUG, INFO, WARNING, ERROR, CRITICAL)
logger = logging.getLogger(__name__) # get instance of a logger
class LoggedInMixin(object):
""" Mixin to ensure user is logged in """
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoggedInMixin, self).dispatch(*args, **kwargs)
def find_job(request):
""" 'Find Job' Page """
query_what = None
query_where = None
form = JobSearchForm(request.GET) # <class 'employer.forms.JobSearchForm'>
form_search = form.search() # Get Search Results from the form
# GET data from the form; make sure fields aren't non-empty values
# filter Haystack's SearchQuerySet, for details see:
# http://django-haystack.readthedocs.org/en/v2.3.1/searchqueryset_api.html
if ('query_what' in request.GET and request.GET['query_what']) or \
('query_where' in request.GET and request.GET['query_where']):
query_what = request.GET['query_what'] # query for what field
query_where = request.GET['query_where'] # query for where field
myquery = query_what + " " + query_where # combine search queries
search_results = form_search.filter(content__contains=myquery) # AND
else:
query_what = 'You submitted an empty form'
query_where = 'You submitted an empty form'
search_results = form_search
# If you want to filter by Model instead of by Haystack's SearchQuerySet
#my_data = Job.objects.filter(active=True).order_by('timestamp_created')
context = {'search_results': search_results}
return render(request, 'find_job.html', context)
def post_job(request):
""" 'Post Job' Page """
if request.method == 'POST':
form = JobForm(data=request.POST) # create form, populate data from request
if form.is_valid():
#Return authenticated user, if any
#username = None
#if request.user.is_authenticated():
# username = request.user.username
company = form.cleaned_data['company']
location = form.cleaned_data['location']
title = form.cleaned_data['title']
description = form.cleaned_data['description']
status = form.cleaned_data['status']
salary_min = form.cleaned_data['salary_min']
salary_max = form.cleaned_data['salary_max']
my_data = Job(created_by=request.user, company=company,
location=location, timestamp_created=timezone.now(),
title=title, description=description, status=status,
salary_min=salary_min, salary_max=salary_max)
my_data.save()
messages.success(request, 'Thanks!')
return HttpResponseRedirect('/')
else: # Request is a 'GET' instead of 'POST'
form = JobForm() # get a blank form
#logger.info("Not a POST")
return render(request, 'post_job.html', {'form': form})
def manage_job_posts(request):
""" 'Manage Job Posts' Page """
my_data = Job.objects.filter(active=True).order_by('timestamp_created')
context = {'my_data': my_data}
return render(request, 'manage_job_posts.html', context)
class JobCreateView(LoggedInMixin, CreateView):
""" Allow Users to Create Jobs """
model = Job
template_name = "job_create.html"
def get_success_url(self):
""" After posting job, go to job management """
return reverse('job-post')
def get_context_data(self, **kwargs):
context = super(JobCreateView, self).get_context_data(**kwargs)
context['action'] = reverse('job-create')
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(JobCreateView, self).form_valid(form)
class JobUpdateView(LoggedInMixin, UpdateView):
""" Allow Users to Update Job """
model = Job
template_name = 'job_update.html'
def get_success_url(self):
""" After updating a job, takes you back to job profile """
return reverse('manage_job_posts')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
class JobListView(LoggedInMixin, ListView):
""" View a specific job """
model = Job
template_name = "job_view.html"
def get_success_url(self):
return reverse('job-list')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
class JobDeleteView(LoggedInMixin, DeleteView):
""" Delete a specific job """
model = Job
template_name = "job_delete.html"
def get_success_url(self):
""" After deleting a job, takes you back to profile """
return reverse('manage_job_posts')
def get_queryset(self):
specific_id = self.kwargs['pk'] # Pass variable 'pk' from urls.py
return Job.objects.filter(id=specific_id)
# FOR DJANGO REST FRAMEWORK (DRF)
class DefaultsMixin(object):
"""
Default settings for view authentication, permissions,
filtering and pagination
"""
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication,
)
permission_classes = (
permissions.IsAuthenticated, # Access to GET, POST, HEAD, OPTIONS
#IsReadOnlyRequest,
#permissions.IsAuthenticatedOrReadOnly
)
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
)
paginate_by = 50
paginate_by_param = 'page_size'
max_paginate_by = 500
# DRF FILTERS
class JobFilter(django_filters.FilterSet):
company = django_filters.CharFilter(name='company')
class Meta:
model = Job
fields = ('timestamp_updated', 'company', 'title')
# DRF VIEWSETS
class JobViewSet(DefaultsMixin, viewsets.ModelViewSet):
queryset = Job.objects.all()
serializer_class = JobSerializer
filter_class = JobFilter
search_fields = ('name')
ordering_fields = ('timestamp_updated')
|
[
"William.Q.Liu@gmail.com"
] |
William.Q.Liu@gmail.com
|
7e198ce9b23e20291e65927d4cb4929ce449664b
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/create_reassignment_task_response.py
|
9d3d17bcac4d36154a7f4e3ba477519e68ca9043
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,225
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateReassignmentTaskResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""CreateReassignmentTaskResponse
The model defined in huaweicloud sdk
:param job_id: 任务ID。
:type job_id: str
"""
super(CreateReassignmentTaskResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this CreateReassignmentTaskResponse.
任务ID。
:return: The job_id of this CreateReassignmentTaskResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this CreateReassignmentTaskResponse.
任务ID。
:param job_id: The job_id of this CreateReassignmentTaskResponse.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateReassignmentTaskResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
6432cf6c0bb2012d7369a431a646f38b43800201
|
29a4e8ffa77a09c418712bb243e1b4d24336e0c1
|
/nbgrader/formgrader/base.py
|
326ee4c2852b3763977915e7b9e277acf09f721b
|
[
"BSD-3-Clause"
] |
permissive
|
silky/nbgrader
|
f52634438d79df80de077569e94562f08f123f0b
|
30f461ee06a03a1e2ed1789016bb49e9f59e61eb
|
refs/heads/master
| 2021-01-18T00:23:18.300627
| 2016-01-08T22:06:45
| 2016-01-08T22:06:45
| 50,624,512
| 1
| 0
| null | 2016-01-29T00:21:36
| 2016-01-29T00:21:36
| null |
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
import json
import functools
from tornado import web
def authenticated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = self.auth.authenticate(self.request)
if result is True:
return f(self, *args, **kwargs) # Success
elif result is False:
raise web.HTTPError(403) # Forbidden
else:
self.redirect(result, permanent=False) # Redirect
return wrapper
class BaseHandler(web.RequestHandler):
@property
def gradebook(self):
return self.settings['gradebook']
@property
def auth(self):
return self.settings['auth']
@property
def mathjax_url(self):
return self.settings['mathjax_url']
@property
def notebook_dir(self):
return self.settings['notebook_dir']
@property
def notebook_dir_format(self):
return self.settings['notebook_dir_format']
@property
def nbgrader_step(self):
return self.settings['nbgrader_step']
@property
def exporter(self):
return self.settings['exporter']
@property
def log(self):
return self.settings['log']
def render(self, name, **ns):
template = self.settings['jinja2_env'].get_template(name)
return template.render(**ns)
def write_error(self, status_code, **kwargs):
if status_code == 500:
html = self.render(
'gradebook_500.tpl',
base_url=self.auth.base_url,
error_code=500)
elif status_code == 502:
html = self.render(
'gradebook_500.tpl',
base_url=self.auth.base_url,
error_code=502)
elif status_code == 403:
html = self.render(
'gradebook_403.tpl',
base_url=self.auth.base_url,
error_code=403)
else:
return super(BaseHandler, self).write_error(status_code, **kwargs)
self.write(html)
self.finish()
class BaseApiHandler(BaseHandler):
def get_json_body(self):
"""Return the body of the request as JSON data."""
if not self.request.body:
return None
body = self.request.body.strip().decode('utf-8')
try:
model = json.loads(body)
except Exception:
self.log.debug("Bad JSON: %r", body)
self.log.error("Couldn't parse JSON", exc_info=True)
raise web.HTTPError(400, 'Invalid JSON in body of request')
return model
|
[
"jhamrick@berkeley.edu"
] |
jhamrick@berkeley.edu
|
69ed92de644fca515a276845a1ab3c88f930d96c
|
ecf6fe6aa87b2c3f041acc30fab11b0cafe3dd46
|
/architecture_py/archi_v3_4.py
|
c44736bffc6e0190265c5c5a8ec71479998ec8b7
|
[] |
no_license
|
antgratia/Memoire_code
|
73c7806c4576c2e73e00d9a84b1063a2c8f6b559
|
2cdc1339ea24896a6628238f6467edff80f98166
|
refs/heads/main
| 2023-06-20T16:19:07.041464
| 2021-07-13T11:53:48
| 2021-07-13T11:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,719
|
py
|
import numpy as np
import os
from keras import backend as K
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Concatenate, Dropout
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.utils import plot_model
import tensorflow as tf
import sys
import traceback
import csv
from time import time
type_archi = 'ALL'
epsilon = 0.0
dropout_rate = 0.4
axis = 3
compress_factor = 0.5
# load dataset
(train_x, train_y), (test_x, test_y) = keras.datasets.cifar10.load_data()
# normalize to range 0-1
train_x = train_x / 255.0
test_x = test_x / 255.0
val_x = train_x[:5000]
val_y = train_y[:5000]
# init training time
training_time = 0
# init result test/train
test_result_loss = ""
test_result_acc = ""
train_result_loss = ""
train_result_acc = ""
nb_layers = "not build"
def id_block(X, f, filters, activation):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Add()([X, X_shortcut])# SKIP Connection
X = Activation(activation)(X)
return X
def conv_block(X, f, filters, activation, s=2):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X_shortcut = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
if epsilon != 0:
X_shortcut = BatchNormalization(epsilon = epsilon, axis=axis)(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation(activation)(X)
return X
def denseBlock(X, f, nb_filter, nb_layer, padding, activation):
x_input = X
for _ in range(0,nb_layer):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
X = Concatenate()([X, x_input])
return X
def transition_block(X, f, nb_filter, padding, activation, op, stride):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
if (op == 'avg'):
X = AveragePooling2D(pool_size = f, strides=stride, padding=padding)(X)
else :
X = MaxPooling2D(pool_size=f, strides=stride, padding=padding)(X)
return X
try:
def getModel():
X_input = X = Input([32, 32, 3])
X = Conv2D(18, kernel_size=5, strides=5, activation='relu', padding='valid')(X)
X = conv_block(X, 2, 36, 'selu', 1)
X = Conv2D(72, kernel_size=7, strides=2, activation='relu', padding='same')(X)
X = conv_block(X, 7, 144, 'tanh', 7)
X = GlobalMaxPooling2D()(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=X_input, outputs=X)
return model
model = getModel()
#plot_model(model, show_shapes=True, to_file="../architecture_img/archi_v3_4.png")
model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
start = time()
es = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, restore_best_weights=True, patience=1)
list_cb = [es]
history = model.fit(train_x, train_y, epochs=50, batch_size=64, validation_split=0.3, callbacks=list_cb)
training_time = time()-start
print(model.evaluate(test_x, test_y))
log_file = open("../architecture_log/archi_v3_4.log" , "w")
# save test result
log_file.write('test result : ' + str(model.evaluate(test_x, test_y)))
test_result_loss = model.evaluate(test_x, test_y)[0]
test_result_acc = model.evaluate(test_x, test_y)[1]
# save train result
log_file.write('train result : ' + str(model.evaluate(test_x, test_y)))
log_file.write('History train result : ' + str(history.history))
train_result_loss = model.evaluate(train_x, train_y)[0]
train_result_acc = model.evaluate(train_x, train_y)[1]
print('OK: file ../architecture_log/archi_v3_4.log has been create')
nb_layers = len(model.layers)
log_file.close()
except:
print('error: file ../architecture_log/archi_v3_4_error.log has been create')
error_file = open("../architecture_log/archi_v3_4_error.log" , "w")
traceback.print_exc(file=error_file)
result_loss = "Error"
result_acc = "Error"
error_file.close()
finally:
file = open('../architecture_results_v3.csv', 'a', newline ='')
with file:
# identifying header
header = ['file_name', 'training_time(s)', 'test_result_loss', 'test_result_acc', 'train_result_acc', 'train_result_loss', 'nb_layers', 'epochs', 'type_archi']
writer = csv.DictWriter(file, fieldnames = header)
# writing data row-wise into the csv file
# writer.writeheader()
writer.writerow({'file_name' : 'archi_v3_4',
'training_time(s)': training_time,
'test_result_loss': test_result_loss,
'test_result_acc': test_result_acc,
'train_result_acc': train_result_acc,
'train_result_loss': train_result_loss,
'nb_layers': nb_layers,
'epochs' : len(history.history['loss']),
'type_archi': type_archi})
print('add line into architecture_results_v3.csv')
file.close()
|
[
"antoine.gratia@student.unamur.be"
] |
antoine.gratia@student.unamur.be
|
717318d0c198f61b25d44184c633b78a922e0488
|
ef390f231b4d8e11e87f3cbe6066ebe2337ce099
|
/MyRMS/urls.py
|
99f166469233f42d271bb3a76adb21f720466711
|
[] |
no_license
|
ShantanuBadmore/MyRMS
|
b46943047705e1fd132cae4bb29acb2ba59fc2b4
|
f47011fc2c04f6534c9a61e36608697ecb4acde9
|
refs/heads/main
| 2023-02-02T05:41:35.670591
| 2020-12-16T13:11:09
| 2020-12-16T13:11:09
| 317,527,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
"""MyRMS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from MyRMS import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("process.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"shantanubad21@gmail.com"
] |
shantanubad21@gmail.com
|
1a182f8146133ca9bc2464aae199c57b3fe6bab9
|
fc6e807c122e77a02bf9fa34fc57e90ebf939b12
|
/src/read_mat.py
|
cd8a54936b4e58a27459d9a549dcd6180844193e
|
[] |
no_license
|
kittenish/Frame-Transformer-Network
|
e924a522fc833df54446a82414df827f83e93348
|
2dda0209ad7dca8f014b9f94d6175f297a825646
|
refs/heads/master
| 2021-01-20T03:34:50.296022
| 2017-06-17T02:48:13
| 2017-06-17T02:48:13
| 89,558,047
| 17
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import scipy.io as sio
import h5py
def read_mat(url):
data = sio.loadmat(url)
return data
def read_mat_v(url,name):
with h5py.File(url, 'r') as f:
data = f[name][()]
return data
|
[
"jrgao0614@gmail.com"
] |
jrgao0614@gmail.com
|
fcc7c1431834b67b33c402bba7abe6602a5fdf0b
|
972e97bbf213ccd29592618638caac1985fc8ffc
|
/models/VGG.py
|
dccb86e182d38b33b92ecca64e609a6a1971f467
|
[] |
no_license
|
LouisChenki/CNNs-Pytorch
|
72222297a91b02af73e2ee9edcba0f3094a7ff97
|
8dfe86c339a1e6ce0d4d4678caa555df7bffbd82
|
refs/heads/master
| 2020-08-06T13:24:41.359640
| 2019-10-08T02:07:22
| 2019-10-08T02:07:22
| 212,990,894
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
import torch.nn as nn
def Conv3x3(in_f, out_f):
return nn.Conv2d(in_channels=in_f, out_channels=out_f, kernel_size=3, stride=1, padding=1)
def Conv1x1(in_f, out_f):
return nn.Conv2d(in_channels=in_f, out_channels=out_f, kernel_size=1, stride=1)
def MaxPool():
return nn.MaxPool2d(kernel_size=2, stride=2)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, num, lrn=False, smallconv=False):
super(BasicBlock, self).__init__()
self.block = self.make_layer(inplanes, planes, num, lrn, smallconv)
def make_layer(self, inplanes, planes, num, lrn=False, smallconv=False):
block = list()
block.append(Conv3x3(in_f=inplanes, out_f=planes))
block.append(nn.ReLU(inplace=True))
for i in range(1, num-1):
block.append(Conv3x3(in_f=planes, out_f=planes))
block.append(nn.ReLU(inplace=True))
if lrn is True:
block.append(nn.LocalResponseNorm(size=5))
elif smallconv is True:
block.append(Conv1x1(in_f=planes, out_f=planes))
block.append(nn.ReLU(inplace=True))
elif num > 1:
block.append(Conv3x3(in_f=planes, out_f=planes))
block.append(nn.ReLU(inplace=True))
return nn.Sequential(*block)
def forward(self, x):
x = self.block(x)
return x
class Classifer(nn.Module):
def __init__(self, num_class=1000):
super(Classifer, self).__init__()
self.fc1 = nn.Linear(7*7*512, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, num_class)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.dropout(self.relu(self.fc1(x)))
x = self.dropout(self.relu(self.fc2(x)))
x = self.relu(self.fc3(x))
return x
class VGG(nn.Module):
def __init__(self, num, lrn=False, smallconv=False, num_class=1000):
super(VGG, self).__init__()
self.block1 = BasicBlock(inplanes=3, planes=64, num=num[0], lrn=lrn, smallconv=False)
self.block2 = BasicBlock(inplanes=64, planes=128, num=num[1])
self.block3 = BasicBlock(inplanes=128, planes=256, num=num[2], smallconv=smallconv)
self.block4 = BasicBlock(inplanes=256, planes=512, num=num[3], smallconv=smallconv)
self.block5 = BasicBlock(inplanes=512, planes=512, num=num[4], smallconv=smallconv)
self.pool = MaxPool()
self.classifer = Classifer(num_class=num_class)
def forward(self, x):
x = self.block1(x)
x = self.pool(x)
x = self.block2(x)
x = self.pool(x)
x = self.block3(x)
x = self.pool(x)
x = self.block4(x)
x = self.pool(x)
x = self.block5(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.classifer(x)
return x
def initialization(self):
for per in self.modules():
if isinstance(per, nn.Conv2d):
nn.init.kaiming_normal_(per.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(per, nn.Linear):
nn.init.normal_(per.weight, 0, 0.01)
nn.init.constant_(per.bias, 0)
def vgg_11(num_class, initialize=False):
net = VGG(num=[1, 1, 2, 2, 2], num_class=num_class)
if initialize:
net.initialization()
return net
def vgg_11_lrn(num_class, initialize=False):
net = VGG(num=[1, 1, 2, 2, 2], lrn=True, num_class=num_class)
if initialize:
net.initialization()
return net
def vgg_13(num_class, initialize=False):
net = VGG(num=[2, 2, 2, 2, 2], num_class=num_class)
if initialize:
net.initialization()
return net
def vgg_16_c(num_class, initialize=False):
net = VGG(num=[2, 2, 3, 3, 3], smallconv=True, num_class=num_class)
if initialize:
net.initialization()
return net
def vgg_16_d(num_class, initialize=False):
net = VGG(num=[2, 2, 3, 3, 3], num_class=num_class)
if initialize:
net.initialization()
return net
def vgg_19(num_class, initialize=False):
net = VGG(num=[2, 2, 4, 4, 4], num_class=num_class)
if initialize:
net.initialization()
return net
|
[
"noreply@github.com"
] |
noreply@github.com
|
73b652f3a24c54f91a9bbff23c0ecf91a474ae00
|
76e59b924b91cea92f86bb2563a16d77ed9d4121
|
/blog/migrations/0001_initial.py
|
4c7bb15def04552395c315e3566f4a31405d8c3a
|
[] |
no_license
|
rwalt04/my-first-blog
|
8325964605ee6fefe5cdc2de9939e9c88e576f62
|
ee406c259c417c58859278e8e0cb4f31ac636cdd
|
refs/heads/master
| 2020-06-16T23:28:16.477740
| 2017-01-10T21:20:50
| 2017-01-10T21:20:50
| 75,058,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-09 04:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"rwalt04@gmail.com"
] |
rwalt04@gmail.com
|
c315f03c95140d141e56d7df055e3cacaea8396e
|
b89ce8c23c4b8a62e98e5ee26fe00563dccd9a57
|
/ex23.py
|
5f4b193c45fc74d4368e166349c96dab01bffecf
|
[] |
no_license
|
LemonGuai/python_study
|
5cf1fd526059902f82f367dab2ec27ce1b08c584
|
00b37575610d5a53f6391c6736ffde6ae59691d6
|
refs/heads/master
| 2021-06-14T03:37:14.412638
| 2021-05-21T14:07:19
| 2021-05-21T14:07:19
| 181,149,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
import sys
script, encoding, error = sys.argv
def main(language_file, encoding, errors):
line = language_file.readline()
if line:
print_line(line, encoding, errors)
return main(language_file, encoding, errors)
def print_line(line, encoding, errors):
next_lang = line.strip()
raw_bytes = next_lang.encode(encoding, errors=errors)
cooked_string = raw_bytes.decode(encoding, errors=errors)
print(raw_bytes, "<===>", cooked_string)
languages = open("languages.txt", encoding="utf-8")
main(languages, encoding, error)
|
[
"csf2412297817@163.com"
] |
csf2412297817@163.com
|
dfb43f4fff46cd2c3b565232d90d955ef89a1bb6
|
bf6cf5e55349e414ccca0fc674f721f589e06e8f
|
/src/main/migrations/0016_auto_20170625_1315.py
|
272f2a8814502db51d4b651ce79756fab5b0ccb4
|
[
"MIT"
] |
permissive
|
shashankmohabia/gymkhana-master
|
cc7e3db4847edc2f91fc752da2fc35ad055f237d
|
a0d399d781797a2f63fb81a1ae287714213d068b
|
refs/heads/master
| 2021-05-03T10:29:29.596959
| 2018-02-06T23:27:16
| 2018-02-06T23:27:16
| 120,535,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('photologue', '0011_auto_20170625_1315'),
('main', '0015_auto_20170625_1239'),
]
operations = [
migrations.AddField(
model_name='club',
name='gallery',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='photologue.Gallery'),
),
migrations.AlterField(
model_name='club',
name='skin',
field=models.CharField(blank=True, choices=[('white-skin', 'White'), ('black-skin', 'Black'), ('cyan-skin', 'Cyan'), ('mdb-skin', 'MDB'), ('deep-purple-skin', 'Deep Purple'), ('navy-blue-skin', 'Navy Blue'), ('pink-skin', 'Pink'), ('indigo-skin', 'Indigo'), ('light-blue-skin', 'Light Blue'), ('grey-skin', 'Grey')], help_text='Choose a skin while displaying club page.', max_length=32, null=True),
),
]
|
[
"shashankmohabia27@gmail.com"
] |
shashankmohabia27@gmail.com
|
932ed77b62071e2341a521257a94cd2988c67347
|
1680de86a673db3370327cdedc891b6c4f6a698c
|
/examples/flask/app/models.py
|
f5f2962cf79f66bdebdd2087e8067ebaa52c9d12
|
[
"Apache-2.0"
] |
permissive
|
broadsheet/facebook-sdk
|
cf477f84de339d6ca9a362fb1a0a283b801f9d79
|
5d4d4723904ca41a944d2c537a4073a43d0066cf
|
refs/heads/master
| 2020-05-29T20:39:02.938799
| 2019-07-18T01:56:27
| 2019-07-18T01:56:27
| 189,357,850
| 0
| 0
|
Apache-2.0
| 2019-05-30T06:24:39
| 2019-05-30T06:24:38
| null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from datetime import datetime
from app import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.String, nullable=False, primary_key=True)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime, default=datetime.utcnow, nullable=False,
onupdate=datetime.utcnow)
name = db.Column(db.String, nullable=False)
profile_url = db.Column(db.String, nullable=False)
access_token = db.Column(db.String, nullable=False)
|
[
"personal.mitchellstewart@gmail.com"
] |
personal.mitchellstewart@gmail.com
|
e62a14fe12df472397d2a04fcbe08d76194ed0ed
|
c117487299300174881a1fcc33d8214b40a70a05
|
/_LaunchpadDJ/launchpadchannelstripcomponent.py
|
7d409c7c8aed4f1dc07dd997188821366fa82923
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
alandrees/RemoteScripts
|
2e3f2bf72e57249f7bc6b3c47931bde9d02dfeea
|
468a98c6652e38dc7887bd7e99338ee4f0cfc8b3
|
refs/heads/master
| 2020-12-24T16:15:04.714352
| 2013-06-11T16:27:29
| 2013-06-11T16:27:29
| 10,479,183
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,706
|
py
|
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.ButtonElement import *
class LaunchpadChannelStripComponent(ChannelStripComponent):
def __init__(self):
ChannelStripComponent.__init__(self)
self._stopall_button = None
def get_track(self):
if(self._track != None):
return self._track
else:
return None
def sends_count(self):
return len(self._track.mixer_device.sends)
def set_stopall_button(self, button):
if (button != self._stopall_button):
if (self._stopall_button != None):
self._stopall_button.remove_value_listener(self._stopall_value)
self._track.remove_fired_slot_index_listener(self._on_stopall_changed)
self._track.remove_playing_slot_index_listener(self._on_stopall_changed)
self._stopall_button.reset()
self._stopall_pressed = False #added
self._stopall_button = button
if (self._stopall_button != None):
self._stopall_button.add_value_listener(self._stopall_value)
self._track.add_fired_slot_index_listener(self._on_stopall_changed)
self._track.add_playing_slot_index_listener(self._on_stopall_changed)
self.update()
def _stopall_value(self,value):
if value != 0:
self._track.stop_all_clips()
self.update()
def remove_stopall_button(self):
if (self._stopall_button != None):
self._stopall_button.remove_value_listener(self._stopall_value)
self._track.remove_fired_slot_index_listener(self._on_stopall_changed)
self._track.remove_playing_slot_index_listener(self._on_stopall_changed)
self._stopall_pressed = False #added
def _on_stopall_changed(self):
if ((self._track != None) and hasattr(self._track, 'fired_slot_index') and hasattr(self._track, 'fired_slot_index')):
if self._stopall_button != None:
if self._track.fired_slot_index == -2:
self._stopall_button.send_value(59)
return None
if self._track.playing_slot_index >= 0:
self._stopall_button.send_value(29,True)
else:
self._stopall_button.send_value(13,True)
def set_track(self, track):
#assert isinstance(track, type(None), Live.Track.Track)
assert ((track == None) or isinstance(track, Live.Track.Track))
if (self._track != None):
if (self._track != self.song().master_track):
if self._track.mixer_device.sends_has_listener(self._on_sends_changed):
self._track.mixer_device.remove_sends_listener(self._on_sends_changed)
if self._track.mute_has_listener(self._on_mute_changed):
self._track.remove_mute_listener(self._on_mute_changed)
if self._track.name_has_listener(self._on_track_name_changed):
self._track.remove_name_listener(self._on_track_name_changed)
if self._track.solo_has_listener(self._on_solo_changed):
self._track.remove_solo_listener(self._on_solo_changed)
if self._track.mixer_device.crossfade_assign_has_listener(self._on_cf_assign_changed):
self._track.mixer_device.remove_crossfade_assign_listener(self._on_cf_assign_changed)
if (self._track not in self.song().return_tracks):
if (self._track.can_be_armed and self._track.arm_has_listener(self._on_arm_changed)):
self._track.remove_arm_listener(self._on_arm_changed)
if self._track.current_input_routing_has_listener(self._on_input_routing_changed):
self._track.remove_current_input_routing_listener(self._on_input_routing_changed)
if (self._pan_control != None):
self._pan_control.release_parameter()
if (self._volume_control != None):
self._volume_control.release_parameter()
if (self._send_controls != None):
for send_control in self._send_controls:
if (send_control != None):
send_control.release_parameter()
self._track = track
if (self._track != None):
assert isinstance(self._track, Live.Track.Track)
assert (self._track in ((self.song().tracks + self.song().return_tracks) + (self.song().master_track,)))
if (self._track != self.song().master_track):
self._track.add_solo_listener(self._on_solo_changed)
self._track.mixer_device.add_sends_listener(self._on_sends_changed)
self._track.add_mute_listener(self._on_mute_changed)
self._track.add_name_listener(self._on_track_name_changed)
self._track.mixer_device.add_crossfade_assign_listener(self._on_cf_assign_changed)
if (self._track not in self.song().return_tracks):
if self._track.can_be_armed:
self._track.add_arm_listener(self._on_arm_changed)
self._track.add_current_input_routing_listener(self._on_input_routing_changed)
if (self._track_name_data_source != None):
self._track_name_data_source.set_display_string(self._track.name)
else:
if (self._track_name_data_source != None):
self._track_name_data_source.set_display_string(' - ')
for button in [self._select_button, self._mute_button, self._solo_button, self._arm_button, self._crossfade_toggle]: #added
if button != None: #added
button.turn_off() #added
self.update()
def update(self):
if self._allow_updates:
if self.is_enabled():
if (self._track != None):
if (self._pan_control != None):
self._pan_control.connect_to(self._track.mixer_device.panning)
if (self._volume_control != None):
self._volume_control.connect_to(self._track.mixer_device.volume)
if (self._send_controls != None):
index = 0
for send_control in self._send_controls:
if (send_control != None):
if (index < len(self._track.mixer_device.sends)):
send_control.connect_to(self._track.mixer_device.sends[index])
else:
send_control.release_parameter()
index += 1
#self._request_rebuild_callback()
self.on_selected_track_changed()
self._on_mute_changed()
self._on_solo_changed()
self._on_arm_changed()
self._on_cf_assign_changed()
self._on_stopall_changed()
else:
if (self._track != None):
if (self._pan_control != None):
self._pan_control.release_parameter()
if (self._volume_control != None):
self._volume_control.release_parameter()
if (self._send_controls != None):
for send_control in self._send_controls:
if (send_control != None):
send_control.release_parameter()
#ControlSurfaceComponent._request_rebuild_callback(self)
else:
self._update_requests += 1
def remove_send_controls(self):
for send_control in self._send_controls:
send_control.release_parameter()
self._send_controls = None
def remove_volume_control(self):
self._volume_control.release_parameter()
self._volume_control = None
def external_solo_trigger(self,value):
if ((self._track != None) and (self._track != self.song().master_track)):
if value != 0:
self._track.solo = True
else:
self._track.solo = False
#def _solo_value(self, value):
# assert (value in range(128))
# if self.is_enabled():
# if ((self._track != None) and (self._track != self.song().master_track)):
# expected_solos_pressed = 0 #added
# if self._solo_pressed: #added
# expected_solos_pressed = 1 #added
# solo_exclusive = (self.song().exclusive_solo != self._shift_pressed) or (ChannelStripComponent.number_of_solos_pressed() == expected_solos_pressed)) #added
# new_value = not self._track.solo #added
# respect_multi_selection = self._track.is_part_of_selection #added
# for track in (self.song().tracks + self.song().return_tracks):
# if (track == self._track) or (respect_multi_selection and track.is_part_of_selection):
# track.solo = new_value
# elif solo_exclusive and track.solo:
# track.solo = False
|
[
"alandrees@theselves.com"
] |
alandrees@theselves.com
|
68c6872a92d545946338db2bc054259b20769654
|
d584b46ae0b5d6ac340ac3730e87d0ec1050ba00
|
/tools/adafruit_mlx90640.py
|
c1cd5a4bf197d9b0df57abe4234bf38764244584
|
[] |
no_license
|
aaron-c-zhao/PeopleCounter
|
74d0dfb301273fb8c97fd0df5a8e4de60ccc58d0
|
b4c15fc5dc082c9198d01005e49a56691c2c0440
|
refs/heads/master
| 2022-11-26T17:32:59.183497
| 2020-08-09T08:36:23
| 2020-08-09T08:36:23
| 279,967,633
| 3
| 0
| null | 2020-07-22T18:41:29
| 2020-07-15T20:11:34
|
C
|
UTF-8
|
Python
| false
| false
| 26,928
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2019 ladyada for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_mlx90640`
================================================================================
Driver for the MLX90640 thermal camera
* Author(s): ladyada
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
import struct
import math
import time
from adafruit_bus_device.i2c_device import I2CDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MLX90640.git"
# We match the melexis library naming, and don't want to change
# pylint: disable=invalid-name
eeData = [0] * 832
I2C_READ_LEN = 2048
SCALEALPHA = 0.000001
MLX90640_DEVICEID1 = 0x2407
OPENAIR_TA_SHIFT = 8
class RefreshRate: # pylint: disable=too-few-public-methods
""" Enum-like class for MLX90640's refresh rate """
REFRESH_0_5_HZ = 0b000 # 0.5Hz
REFRESH_1_HZ = 0b001 # 1Hz
REFRESH_2_HZ = 0b010 # 2Hz
REFRESH_4_HZ = 0b011 # 4Hz
REFRESH_8_HZ = 0b100 # 8Hz
REFRESH_16_HZ = 0b101 # 16Hz
REFRESH_32_HZ = 0b110 # 32Hz
REFRESH_64_HZ = 0b111 # 64Hz
class MLX90640: # pylint: disable=too-many-instance-attributes
"""Interface to the MLX90640 temperature sensor."""
kVdd = 0
vdd25 = 0
KvPTAT = 0
KtPTAT = 0
vPTAT25 = 0
alphaPTAT = 0
gainEE = 0
tgc = 0
KsTa = 0
resolutionEE = 0
calibrationModeEE = 0
ksTo = [0] * 5
ct = [0] * 5
alpha = [0] * 768
alphaScale = 0
offset = [0] * 768
kta = [0] * 768
ktaScale = 0
kv = [0] * 768
kvScale = 0
cpAlpha = [0] * 2
cpOffset = [0] * 2
ilChessC = [0] * 3
brokenPixels = [0xFFFF] * 5
outlierPixels = [0xFFFF] * 5
cpKta = 0
cpKv = 0
def __init__(self, i2c_bus, address=0x33):
self.i2c_device = I2CDevice(i2c_bus, address)
self._I2CReadWords(0x2400, eeData)
# print(eeData)
self._ExtractParameters()
@property
def serial_number(self):
""" 3-item tuple of hex values that are unique to each MLX90640 """
serialWords = [0, 0, 0]
self._I2CReadWords(MLX90640_DEVICEID1, serialWords)
return serialWords
@property
def refresh_rate(self):
""" How fast the MLX90640 will spit out data. Start at lowest speed in
RefreshRate and then slowly increase I2C clock rate and rate until you
max out. The sensor does not like it if the I2C host cannot 'keep up'!"""
controlRegister = [0]
self._I2CReadWords(0x800D, controlRegister)
return (controlRegister[0] >> 7) & 0x07
@refresh_rate.setter
def refresh_rate(self, rate):
controlRegister = [0]
value = (rate & 0x7) << 7
self._I2CReadWords(0x800D, controlRegister)
value |= controlRegister[0] & 0xFC7F
self._I2CWriteWord(0x800D, value)
def getFrame(self, framebuf):
""" Request both 'halves' of a frame from the sensor, merge them
and calculate the temperature in C for each of 32x24 pixels. Placed
into the 768-element array passed in! """
emissivity = 0.95
tr = 23.15
mlx90640Frame = [0] * 834
mFrames = [[0 for x in range(834)] for y in range(2)]
for i in range(2):
status = self._GetFrameData(mlx90640Frame)
mFrames[i] = mlx90640Frame.copy()
if status < 0:
raise RuntimeError("Frame data error")
# For a MLX90640 in the open air the shift is -8 degC.
tr = self._GetTa(mlx90640Frame) - OPENAIR_TA_SHIFT
self._CalculateTo(mlx90640Frame, emissivity, tr, framebuf)
return mFrames
def getEeData(self):
return eeData
def _GetFrameData(self, frameData):
dataReady = 0
cnt = 0
statusRegister = [0]
controlRegister = [0]
while dataReady == 0:
self._I2CReadWords(0x8000, statusRegister)
dataReady = statusRegister[0] & 0x0008
# print("ready status: 0x%x" % dataReady)
while (dataReady != 0) and (cnt < 5):
self._I2CWriteWord(0x8000, 0x0030)
# print("Read frame", cnt)
self._I2CReadWords(0x0400, frameData, end=832)
self._I2CReadWords(0x8000, statusRegister)
dataReady = statusRegister[0] & 0x0008
# print("frame ready: 0x%x" % dataReady)
cnt += 1
if cnt > 4:
raise RuntimeError("Too many retries")
self._I2CReadWords(0x800D, controlRegister)
frameData[832] = controlRegister[0]
frameData[833] = statusRegister[0] & 0x0001
return frameData[833]
def _GetTa(self, frameData):
vdd = self._GetVdd(frameData)
ptat = frameData[800]
if ptat > 32767:
ptat -= 65536
ptatArt = frameData[768]
if ptatArt > 32767:
ptatArt -= 65536
ptatArt = (ptat / (ptat * self.alphaPTAT + ptatArt)) * math.pow(2, 18)
ta = ptatArt / (1 + self.KvPTAT * (vdd - 3.3)) - self.vPTAT25
ta = ta / self.KtPTAT + 25
return ta
def _GetVdd(self, frameData):
vdd = frameData[810]
if vdd > 32767:
vdd -= 65536
resolutionRAM = (frameData[832] & 0x0C00) >> 10
resolutionCorrection = math.pow(2, self.resolutionEE) / math.pow(
2, resolutionRAM
)
vdd = (resolutionCorrection * vdd - self.vdd25) / self.kVdd + 3.3
return vdd
def _CalculateTo(self, frameData, emissivity, tr, result):
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
subPage = frameData[833]
alphaCorrR = [0] * 4
irDataCP = [0, 0]
vdd = self._GetVdd(frameData)
ta = self._GetTa(frameData)
ta4 = ta + 273.15
ta4 = ta4 * ta4
ta4 = ta4 * ta4
tr4 = tr + 273.15
tr4 = tr4 * tr4
tr4 = tr4 * tr4
taTr = tr4 - (tr4 - ta4) / emissivity
ktaScale = math.pow(2, self.ktaScale)
kvScale = math.pow(2, self.kvScale)
alphaScale = math.pow(2, self.alphaScale)
alphaCorrR[0] = 1 / (1 + self.ksTo[0] * 40)
alphaCorrR[1] = 1
alphaCorrR[2] = 1 + self.ksTo[1] * self.ct[2]
alphaCorrR[3] = alphaCorrR[2] * (1 + self.ksTo[2] * (self.ct[3] - self.ct[2]))
# --------- Gain calculation -----------------------------------
gain = frameData[778]
if gain > 32767:
gain -= 65536
gain = self.gainEE / gain
# --------- To calculation -------------------------------------
mode = (frameData[832] & 0x1000) >> 5
irDataCP[0] = frameData[776]
irDataCP[1] = frameData[808]
for i in range(2):
if irDataCP[i] > 32767:
irDataCP[i] -= 65536
irDataCP[i] *= gain
irDataCP[0] -= (
self.cpOffset[0]
* (1 + self.cpKta * (ta - 25))
* (1 + self.cpKv * (vdd - 3.3))
)
if mode == self.calibrationModeEE:
irDataCP[1] -= (
self.cpOffset[1]
* (1 + self.cpKta * (ta - 25))
* (1 + self.cpKv * (vdd - 3.3))
)
else:
irDataCP[1] -= (
(self.cpOffset[1] + self.ilChessC[0])
* (1 + self.cpKta * (ta - 25))
* (1 + self.cpKv * (vdd - 3.3))
)
for pixelNumber in range(768):
ilPattern = pixelNumber // 32 - (pixelNumber // 64) * 2
chessPattern = ilPattern ^ (pixelNumber - (pixelNumber // 2) * 2)
conversionPattern = (
(pixelNumber + 2) // 4
- (pixelNumber + 3) // 4
+ (pixelNumber + 1) // 4
- pixelNumber // 4
) * (1 - 2 * ilPattern)
if mode == 0:
pattern = ilPattern
else:
pattern = chessPattern
if pattern == frameData[833]:
irData = frameData[pixelNumber]
if irData > 32767:
irData -= 65536
irData *= gain
kta = self.kta[pixelNumber] / ktaScale
kv = self.kv[pixelNumber] / kvScale
irData -= (
self.offset[pixelNumber]
* (1 + kta * (ta - 25))
* (1 + kv * (vdd - 3.3))
)
if mode != self.calibrationModeEE:
irData += (
self.ilChessC[2] * (2 * ilPattern - 1)
- self.ilChessC[1] * conversionPattern
)
irData = irData - self.tgc * irDataCP[subPage]
irData /= emissivity
alphaCompensated = SCALEALPHA * alphaScale / self.alpha[pixelNumber]
alphaCompensated *= 1 + self.KsTa * (ta - 25)
Sx = (
alphaCompensated
* alphaCompensated
* alphaCompensated
* (irData + alphaCompensated * taTr)
)
Sx = math.sqrt(math.sqrt(Sx)) * self.ksTo[1]
To = (
math.sqrt(
math.sqrt(
irData
/ (alphaCompensated * (1 - self.ksTo[1] * 273.15) + Sx)
+ taTr
)
)
- 273.15
)
if To < self.ct[1]:
torange = 0
elif To < self.ct[2]:
torange = 1
elif To < self.ct[3]:
torange = 2
else:
torange = 3
To = (
math.sqrt(
math.sqrt(
irData
/ (
alphaCompensated
* alphaCorrR[torange]
* (1 + self.ksTo[torange] * (To - self.ct[torange]))
)
+ taTr
)
)
- 273.15
)
result[pixelNumber] = To
# pylint: enable=too-many-locals, too-many-branches, too-many-statements
def _ExtractParameters(self):
self._ExtractVDDParameters()
self._ExtractPTATParameters()
self._ExtractGainParameters()
self._ExtractTgcParameters()
self._ExtractResolutionParameters()
self._ExtractKsTaParameters()
self._ExtractKsToParameters()
self._ExtractCPParameters()
self._ExtractAlphaParameters()
self._ExtractOffsetParameters()
self._ExtractKtaPixelParameters()
self._ExtractKvPixelParameters()
self._ExtractCILCParameters()
self._ExtractDeviatingPixels()
# debug output
# print('-'*40)
# print("kVdd = %d, vdd25 = %d" % (self.kVdd, self.vdd25))
# print("KvPTAT = %f, KtPTAT = %f, vPTAT25 = %d, alphaPTAT = %f" %
# (self.KvPTAT, self.KtPTAT, self.vPTAT25, self.alphaPTAT))
# print("Gain = %d, Tgc = %f, Resolution = %d" % (self.gainEE, self.tgc, self.resolutionEE))
# print("KsTa = %f, ksTo = %s, ct = %s" % (self.KsTa, self.ksTo, self.ct))
# print("cpAlpha:", self.cpAlpha, "cpOffset:", self.cpOffset)
# print("alpha: ", self.alpha)
# print("alphascale: ", self.alphaScale)
# print("offset: ", self.offset)
# print("kta:", self.kta)
# print("ktaScale:", self.ktaScale)
# print("kv:", self.kv)
# print("kvScale:", self.kvScale)
# print("calibrationModeEE:", self.calibrationModeEE)
# print("ilChessC:", self.ilChessC)
# print('-'*40)
def _ExtractVDDParameters(self):
# extract VDD
self.kVdd = (eeData[51] & 0xFF00) >> 8
if self.kVdd > 127:
self.kVdd -= 256 # convert to signed
self.kVdd *= 32
self.vdd25 = eeData[51] & 0x00FF
self.vdd25 = ((self.vdd25 - 256) << 5) - 8192
def _ExtractPTATParameters(self):
# extract PTAT
self.KvPTAT = (eeData[50] & 0xFC00) >> 10
if self.KvPTAT > 31:
self.KvPTAT -= 64
self.KvPTAT /= 4096
self.KtPTAT = eeData[50] & 0x03FF
if self.KtPTAT > 511:
self.KtPTAT -= 1024
self.KtPTAT /= 8
self.vPTAT25 = eeData[49]
self.alphaPTAT = (eeData[16] & 0xF000) / math.pow(2, 14) + 8
def _ExtractGainParameters(self):
# extract Gain
self.gainEE = eeData[48]
if self.gainEE > 32767:
self.gainEE -= 65536
def _ExtractTgcParameters(self):
# extract Tgc
self.tgc = eeData[60] & 0x00FF
if self.tgc > 127:
self.tgc -= 256
self.tgc /= 32
def _ExtractResolutionParameters(self):
# extract resolution
self.resolutionEE = (eeData[56] & 0x3000) >> 12
def _ExtractKsTaParameters(self):
# extract KsTa
self.KsTa = (eeData[60] & 0xFF00) >> 8
if self.KsTa > 127:
self.KsTa -= 256
self.KsTa /= 8192
def _ExtractKsToParameters(self):
# extract ksTo
step = ((eeData[63] & 0x3000) >> 12) * 10
self.ct[0] = -40
self.ct[1] = 0
self.ct[2] = (eeData[63] & 0x00F0) >> 4
self.ct[3] = (eeData[63] & 0x0F00) >> 8
self.ct[2] *= step
self.ct[3] = self.ct[2] + self.ct[3] * step
KsToScale = (eeData[63] & 0x000F) + 8
KsToScale = 1 << KsToScale
self.ksTo[0] = eeData[61] & 0x00FF
self.ksTo[1] = (eeData[61] & 0xFF00) >> 8
self.ksTo[2] = eeData[62] & 0x00FF
self.ksTo[3] = (eeData[62] & 0xFF00) >> 8
for i in range(4):
if self.ksTo[i] > 127:
self.ksTo[i] -= 256
self.ksTo[i] /= KsToScale
self.ksTo[4] = -0.0002
def _ExtractCPParameters(self):
# extract CP
offsetSP = [0] * 2
alphaSP = [0] * 2
alphaScale = ((eeData[32] & 0xF000) >> 12) + 27
offsetSP[0] = eeData[58] & 0x03FF
if offsetSP[0] > 511:
offsetSP[0] -= 1024
offsetSP[1] = (eeData[58] & 0xFC00) >> 10
if offsetSP[1] > 31:
offsetSP[1] -= 64
offsetSP[1] += offsetSP[0]
alphaSP[0] = eeData[57] & 0x03FF
if alphaSP[0] > 511:
alphaSP[0] -= 1024
alphaSP[0] /= math.pow(2, alphaScale)
alphaSP[1] = (eeData[57] & 0xFC00) >> 10
if alphaSP[1] > 31:
alphaSP[1] -= 64
alphaSP[1] = (1 + alphaSP[1] / 128) * alphaSP[0]
cpKta = eeData[59] & 0x00FF
if cpKta > 127:
cpKta -= 256
ktaScale1 = ((eeData[56] & 0x00F0) >> 4) + 8
self.cpKta = cpKta / math.pow(2, ktaScale1)
cpKv = (eeData[59] & 0xFF00) >> 8
if cpKv > 127:
cpKv -= 256
kvScale = (eeData[56] & 0x0F00) >> 8
self.cpKv = cpKv / math.pow(2, kvScale)
self.cpAlpha[0] = alphaSP[0]
self.cpAlpha[1] = alphaSP[1]
self.cpOffset[0] = offsetSP[0]
self.cpOffset[1] = offsetSP[1]
def _ExtractAlphaParameters(self):
# extract alpha
accRemScale = eeData[32] & 0x000F
accColumnScale = (eeData[32] & 0x00F0) >> 4
accRowScale = (eeData[32] & 0x0F00) >> 8
alphaScale = ((eeData[32] & 0xF000) >> 12) + 30
alphaRef = eeData[33]
accRow = [0] * 24
accColumn = [0] * 32
alphaTemp = [0] * 768
for i in range(6):
p = i * 4
accRow[p + 0] = eeData[34 + i] & 0x000F
accRow[p + 1] = (eeData[34 + i] & 0x00F0) >> 4
accRow[p + 2] = (eeData[34 + i] & 0x0F00) >> 8
accRow[p + 3] = (eeData[34 + i] & 0xF000) >> 12
for i in range(24):
if accRow[i] > 7:
accRow[i] -= 16
for i in range(8):
p = i * 4
accColumn[p + 0] = eeData[40 + i] & 0x000F
accColumn[p + 1] = (eeData[40 + i] & 0x00F0) >> 4
accColumn[p + 2] = (eeData[40 + i] & 0x0F00) >> 8
accColumn[p + 3] = (eeData[40 + i] & 0xF000) >> 12
for i in range(32):
if accColumn[i] > 7:
accColumn[i] -= 16
for i in range(24):
for j in range(32):
p = 32 * i + j
alphaTemp[p] = (eeData[64 + p] & 0x03F0) >> 4
if alphaTemp[p] > 31:
alphaTemp[p] -= 64
alphaTemp[p] *= 1 << accRemScale
alphaTemp[p] += (
alphaRef
+ (accRow[i] << accRowScale)
+ (accColumn[j] << accColumnScale)
)
alphaTemp[p] /= math.pow(2, alphaScale)
alphaTemp[p] -= self.tgc * (self.cpAlpha[0] + self.cpAlpha[1]) / 2
alphaTemp[p] = SCALEALPHA / alphaTemp[p]
# print("alphaTemp: ", alphaTemp)
temp = max(alphaTemp)
# print("temp", temp)
alphaScale = 0
while temp < 32768:
temp *= 2
alphaScale += 1
for i in range(768):
temp = alphaTemp[i] * math.pow(2, alphaScale)
self.alpha[i] = int(temp + 0.5)
self.alphaScale = alphaScale
def _ExtractOffsetParameters(self):
# extract offset
occRow = [0] * 24
occColumn = [0] * 32
occRemScale = eeData[16] & 0x000F
occColumnScale = (eeData[16] & 0x00F0) >> 4
occRowScale = (eeData[16] & 0x0F00) >> 8
offsetRef = eeData[17]
if offsetRef > 32767:
offsetRef -= 65536
for i in range(6):
p = i * 4
occRow[p + 0] = eeData[18 + i] & 0x000F
occRow[p + 1] = (eeData[18 + i] & 0x00F0) >> 4
occRow[p + 2] = (eeData[18 + i] & 0x0F00) >> 8
occRow[p + 3] = (eeData[18 + i] & 0xF000) >> 12
for i in range(24):
if occRow[i] > 7:
occRow[i] -= 16
for i in range(8):
p = i * 4
occColumn[p + 0] = eeData[24 + i] & 0x000F
occColumn[p + 1] = (eeData[24 + i] & 0x00F0) >> 4
occColumn[p + 2] = (eeData[24 + i] & 0x0F00) >> 8
occColumn[p + 3] = (eeData[24 + i] & 0xF000) >> 12
for i in range(32):
if occColumn[i] > 7:
occColumn[i] -= 16
for i in range(24):
for j in range(32):
p = 32 * i + j
self.offset[p] = (eeData[64 + p] & 0xFC00) >> 10
if self.offset[p] > 31:
self.offset[p] -= 64
self.offset[p] *= 1 << occRemScale
self.offset[p] += (
offsetRef
+ (occRow[i] << occRowScale)
+ (occColumn[j] << occColumnScale)
)
def _ExtractKtaPixelParameters(self): # pylint: disable=too-many-locals
# extract KtaPixel
KtaRC = [0] * 4
ktaTemp = [0] * 768
KtaRoCo = (eeData[54] & 0xFF00) >> 8
if KtaRoCo > 127:
KtaRoCo -= 256
KtaRC[0] = KtaRoCo
KtaReCo = eeData[54] & 0x00FF
if KtaReCo > 127:
KtaReCo -= 256
KtaRC[2] = KtaReCo
KtaRoCe = (eeData[55] & 0xFF00) >> 8
if KtaRoCe > 127:
KtaRoCe -= 256
KtaRC[1] = KtaRoCe
KtaReCe = eeData[55] & 0x00FF
if KtaReCe > 127:
KtaReCe -= 256
KtaRC[3] = KtaReCe
ktaScale1 = ((eeData[56] & 0x00F0) >> 4) + 8
ktaScale2 = eeData[56] & 0x000F
for i in range(24):
for j in range(32):
p = 32 * i + j
split = 2 * (p // 32 - (p // 64) * 2) + p % 2
ktaTemp[p] = (eeData[64 + p] & 0x000E) >> 1
if ktaTemp[p] > 3:
ktaTemp[p] -= 8
ktaTemp[p] *= 1 << ktaScale2
ktaTemp[p] += KtaRC[split]
ktaTemp[p] /= math.pow(2, ktaScale1)
# ktaTemp[p] = ktaTemp[p] * mlx90640->offset[p];
temp = abs(ktaTemp[0])
for kta in ktaTemp:
temp = max(temp, abs(kta))
ktaScale1 = 0
while temp < 64:
temp *= 2
ktaScale1 += 1
for i in range(768):
temp = ktaTemp[i] * math.pow(2, ktaScale1)
if temp < 0:
self.kta[i] = int(temp - 0.5)
else:
self.kta[i] = int(temp + 0.5)
self.ktaScale = ktaScale1
def _ExtractKvPixelParameters(self):
KvT = [0] * 4
kvTemp = [0] * 768
KvRoCo = (eeData[52] & 0xF000) >> 12
if KvRoCo > 7:
KvRoCo -= 16
KvT[0] = KvRoCo
KvReCo = (eeData[52] & 0x0F00) >> 8
if KvReCo > 7:
KvReCo -= 16
KvT[2] = KvReCo
KvRoCe = (eeData[52] & 0x00F0) >> 4
if KvRoCe > 7:
KvRoCe -= 16
KvT[1] = KvRoCe
KvReCe = eeData[52] & 0x000F
if KvReCe > 7:
KvReCe -= 16
KvT[3] = KvReCe
kvScale = (eeData[56] & 0x0F00) >> 8
for i in range(24):
for j in range(32):
p = 32 * i + j
split = 2 * (p // 32 - (p // 64) * 2) + p % 2
kvTemp[p] = KvT[split]
kvTemp[p] /= math.pow(2, kvScale)
# kvTemp[p] = kvTemp[p] * mlx90640->offset[p];
temp = abs(kvTemp[0])
for kv in kvTemp:
temp = max(temp, abs(kv))
kvScale = 0
while temp < 64:
temp *= 2
kvScale += 1
for i in range(768):
temp = kvTemp[i] * math.pow(2, kvScale)
if temp < 0:
self.kv[i] = int(temp - 0.5)
else:
self.kv[i] = int(temp + 0.5)
self.kvScale = kvScale
def _ExtractCILCParameters(self):
ilChessC = [0] * 3
self.calibrationModeEE = (eeData[10] & 0x0800) >> 4
self.calibrationModeEE = self.calibrationModeEE ^ 0x80
ilChessC[0] = eeData[53] & 0x003F
if ilChessC[0] > 31:
ilChessC[0] -= 64
ilChessC[0] /= 16.0
ilChessC[1] = (eeData[53] & 0x07C0) >> 6
if ilChessC[1] > 15:
ilChessC[1] -= 32
ilChessC[1] /= 2.0
ilChessC[2] = (eeData[53] & 0xF800) >> 11
if ilChessC[2] > 15:
ilChessC[2] -= 32
ilChessC[2] /= 8.0
self.ilChessC = ilChessC
def _ExtractDeviatingPixels(self):
self.brokenPixels = [0xFFFF] * 5
self.outlierPixels = [0xFFFF] * 5
pixCnt = 0
brokenPixCnt = 0
outlierPixCnt = 0
while (pixCnt < 768) and (brokenPixCnt < 5) and (outlierPixCnt < 5):
if eeData[pixCnt + 64] == 0:
self.brokenPixels[brokenPixCnt] = pixCnt
brokenPixCnt += 1
elif (eeData[pixCnt + 64] & 0x0001) != 0:
self.outlierPixels[outlierPixCnt] = pixCnt
outlierPixCnt += 1
pixCnt += 1
if brokenPixCnt > 4:
raise RuntimeError("More than 4 broken pixels")
if outlierPixCnt > 4:
raise RuntimeError("More than 4 outlier pixels")
if (brokenPixCnt + outlierPixCnt) > 4:
raise RuntimeError("More than 4 faulty pixels")
# print("Found %d broken pixels, %d outliers" % (brokenPixCnt, outlierPixCnt))
# TODO INCOMPLETE
def _I2CWriteWord(self, writeAddress, data):
cmd = bytearray(4)
cmd[0] = writeAddress >> 8
cmd[1] = writeAddress & 0x00FF
cmd[2] = data >> 8
cmd[3] = data & 0x00FF
dataCheck = [0]
with self.i2c_device as i2c:
i2c.write(cmd)
# print("Wrote:", [hex(i) for i in cmd])
time.sleep(0.001)
self._I2CReadWords(writeAddress, dataCheck)
# print("dataCheck: 0x%x" % dataCheck[0])
# if (dataCheck != data):
# return -2
def _I2CReadWords(self, addr, buffer, *, end=None):
# stamp = time.monotonic()
if end is None:
remainingWords = len(buffer)
else:
remainingWords = end
offset = 0
addrbuf = bytearray(2)
inbuf = bytearray(2 * I2C_READ_LEN)
with self.i2c_device as i2c:
while remainingWords:
addrbuf[0] = addr >> 8 # MSB
addrbuf[1] = addr & 0xFF # LSB
read_words = min(remainingWords, I2C_READ_LEN)
i2c.write_then_readinto(
addrbuf, inbuf, in_end=read_words * 2
) # in bytes
# print("-> ", [hex(i) for i in addrbuf])
outwords = struct.unpack(
">" + "H" * read_words, inbuf[0 : read_words * 2]
)
# print("<- (", read_words, ")", [hex(i) for i in outwords])
for i, w in enumerate(outwords):
buffer[offset + i] = w
offset += read_words
remainingWords -= read_words
addr += read_words
# print("i2c read", read_words, "words in", time.monotonic()-stamp)
# print("Read: ", [hex(i) for i in buffer[0:10]])
|
[
"aaron.zhaocr@gmail.com"
] |
aaron.zhaocr@gmail.com
|
722d79cb29211d56a0e6316bfa66ae6081ac3f71
|
f928722af30cf9b029ec3713d0f430d725281a1f
|
/Dataset/JigsawImageLoader.py
|
d34689c542d41f77eaf6bce8cf0f82681e0a78a5
|
[] |
no_license
|
pneha2612/JigsawPuzzlePytorch
|
3cbf0efd24698654396b3c574256af8f5cd6ba7c
|
44ff69656101abd5fe5671fb03d70df4e018f8b8
|
refs/heads/master
| 2021-05-04T04:37:43.229224
| 2018-02-01T15:29:59
| 2018-02-01T15:29:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,773
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 11:58:07 2017
@author: Biagio Brattoli
"""
import os, numpy as np
from time import time
import torch.utils.data as data
import torchvision.transforms as transforms
import torch
from PIL import Image
class DataLoader(data.Dataset):
def __init__(self,data_path,txt_list,classes=1000):
self.data_path = data_path
self.names, _ = self.__dataset_info(txt_list)
self.N = len(self.names)
self.permutations = self.__retrive_permutations(classes)
self.__image_transformer = transforms.Compose([
transforms.Resize(256,Image.BILINEAR),
transforms.CenterCrop(225)])
self.__augment_tile = transforms.Compose([
transforms.RandomCrop(64),
transforms.Resize((75,75)),
transforms.Lambda(rgb_jittering),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std =[0.229, 0.224, 0.225])])
def __getitem__(self, index):
framename = self.data_path+'/'+self.names[index]
t_load = time()
img = Image.open(framename).convert('RGB')
#print 'Load image in %.5f'%(time()-t_load)
t_proc = time()
img = self.__image_transformer(img)
a = 75/2
tiles = [None] * 9
for n in range(9):
i = n/3
j = n%3
c = [a*i*2+a,a*j*2+a]
tile = img.crop((c[1]-a,c[0]-a,c[1]+a+1,c[0]+a+1))
tile = self.__augment_tile(tile)
t = time()
# Normalize the patches indipendently to avoid low level features shortcut
m = tile.mean()
s = tile.std()
norm = transforms.Normalize(mean=[m, m, m],
std =[s, s, s])
tile = norm(tile)
tiles[n] = tile
order = np.random.randint(len(self.permutations))
data = [tiles[self.permutations[order][t]] for t in range(9)]
data = torch.stack(data,0)
#print 'Process image in %.5f'%(time()-t_proc)
return data,int(order), np.array(img)
def __len__(self):
return len(self.names)
def __dataset_info(self,txt_labels):
with open(txt_labels,'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.split(' ')
file_names.append(row[0])
labels.append(int(row[1]))
return file_names, labels
#def __dataset_info(self,data_path='./data/'):
#file_names = []
#folders = os.listdir(data_path)
#for f in folders:
#if self.is_train:
#names = os.listdir(data_path+'/'+f)
#for ff in names:
#if '.JPEG' in ff:
#file_names.append(f+'/'+ff)
#else:
#if '.JPEG' in f:
#file_names.append(f)
#return file_names
def __retrive_permutations(self,classes):
all_perm = np.load('permutations_%d.npy'%(classes))
# from range [1,9] to [0,8]
if all_perm.min()==1:
all_perm = all_perm-1
return all_perm
def rgb_jittering(im):
im = np.array(im,np.float32)#convert to numpy array
for ch in range(3):
thisRand = np.random.uniform(0.8, 1.2)
im[:,:,ch] *= thisRand
shiftVal = np.random.randint(0,6)
if np.random.randint(2) == 1:
shiftVal = -shiftVal
im += shiftVal;
im = im.astype(np.uint8)
im = im.astype(np.float32)
return im
|
[
"biagio.brattoli@iwr.uni-heidelberg.de"
] |
biagio.brattoli@iwr.uni-heidelberg.de
|
adc1969c730cf5e6bf2379a42761dc2318e0ada4
|
da738cb496c189c880e09c812874db9a48574df1
|
/back/app/bets/admin.py
|
ef2428743df97730bd3831acbc33ccb7759237d0
|
[] |
no_license
|
SNVC1/mipt-fullstack2019
|
2972ed20e146394005fa41a29a25fc85674b2271
|
5dd6d5e61a8312917d68d863f6dca5876fad352a
|
refs/heads/master
| 2023-01-08T10:40:58.535068
| 2020-11-10T17:59:19
| 2020-11-10T17:59:19
| 208,422,125
| 0
| 0
| null | 2022-12-12T18:12:35
| 2019-09-14T10:06:21
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 84
|
py
|
from django.contrib import admin
from . models import Bet
admin.site.register(Bet)
|
[
"iggy99@mail.ru"
] |
iggy99@mail.ru
|
35b45b2be379d6f6d530bbdbd64ba03d01e366ed
|
3967da624f40f5d865dfdd0a788cbadd3e4a5361
|
/python/searching/__init__.py
|
96fff532442fb73a147ab67521c186297489aff8
|
[] |
no_license
|
hero24/Algorithms
|
42f459a5c8b98074afb9e453e0e5774bf25964d1
|
8bdaed13581825bdafa0db835fa5faf00ecc3e10
|
refs/heads/master
| 2023-04-16T23:50:02.839615
| 2023-03-27T16:54:32
| 2023-03-27T16:54:32
| 76,741,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from .sequential_search import sequential_search
"""
Tell me and I forget. Teach me and I remember. Involve me and I learn.
-Benjamin Franklin
"""
__all__ = [
"sequential_search"
]
|
[
"hero24@interia.pl"
] |
hero24@interia.pl
|
a2766e32c26c9292bc462dff5135525659e4db57
|
4b126dcb6009fd1e26c1290fa35c931ba5727aea
|
/blog/views.py
|
fe837e5f0de0c59e80a16e671f86abf5b172d2ce
|
[] |
no_license
|
LetitiaWood/django3-personal-portfolio
|
4438855759a8203d0d841d66e2b090d456d94565
|
a8ebf2427c8e307702961a3ea8df698a71fcb80d
|
refs/heads/main
| 2023-04-11T06:40:43.490232
| 2021-04-24T02:50:38
| 2021-04-24T02:50:38
| 361,054,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Blog
# Create your views here.
def all_blogs(request):
blog_count = Blog.objects.count()
blogs = Blog.objects.order_by('-date')[:3] #the most current three blogs will pop up;
return render(request, 'blog/all_blogs.html', {'blogs':blogs, 'count':blog_count})
def detail(request, blog_id):
blog = get_object_or_404(Blog,pk=blog_id)
return render(request, 'blog/detail.html', {'blog':blog})
|
[
"Letitia_wood@icloud.com"
] |
Letitia_wood@icloud.com
|
1a90a7a11a31b6d2bd8d513513d6dff28f93aca6
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping23/StrippingRD/StrippingD23MuLines.py
|
f22c2b8963448ca27883ac74c5c22c1eb3680061
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,651
|
py
|
"""
Module for construction of D+ -> mu l+ l- lines
Performance
Full.dst:
#########
StrippingReport INFO Event 500000, Good event 500000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23MuLine | 0.0014| 7| 1.000| 0.112|
|!StrippingD23MuD2MueeLine | 0.0030| 15| 1.000| 0.131|
|!StrippingD23MuD23PiLine | 0.0130| 65| 1.354| 0.033|
MC: D+ -> 3pi (21163012)
########################
StrippingReport INFO Event 100000, Good event 100000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23PiLine | 0.6500| 650| 1.008| 0.569|
MC: D+ -> K 2pi (21163020)
##########################
StrippingReport INFO Event 100000, Good event 100000
| *Decision name*|*Rate,%*|*Accepted*| *Mult*|*ms/evt*|
|!StrippingD23MuD23PiLine | 0.0130| 13| 1.077| 0.266|
Exported symbols (use python help!):
-
"""
__author__ = ["Oliver Gruenberg"]
__date__ = "19.05.2015"
__version__ = "$Revision: 1.0 $"
#############################################################################
__all__ = ("D23MuLinesConf",
"config_default", )
#############################################################################
from Gaudi.Configuration import *
from Configurables import FilterDesktop, CombineParticles, DaVinci__N3BodyDecays
from PhysSelPython.Wrappers import Selection, DataOnDemand
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
#from StrippingSelections.Utils import checkConfig
from GaudiKernel.PhysicalConstants import c_light
#############################################################################
default_config = {
"NAME" : "D23Mu",
"WGs" : [ "RD" ],
"STREAMS" : [ "Leptonic" ],
"BUILDERTYPE" : "D23MuLinesConf",
"CONFIG" : {
# TrackCuts
"MinTrIPChi2" : 25.0,
"MaxTrChi2Dof" : 3.0,
"MaxTrGhp" : 0.3,
# CombiCuts
"MaxDoca" : 0.3, # (mm)
"mDiffDLoose" : 150, # (MeV)
"mDiffDTight" : 150, # (MeV)
# MotherCuts
"MaxIPChi2" : 25,
"MinVDChi2" : 225,
"MaxVtxChi2Dof" : 9,
"MinDira" : 0.0,
"MinTau" : 0.1, # (ps)
# scalings
"Postscale" : 1,
"D23MuPrescale" : 1,
"D2MueePrescale" : 1,
"D23PiPrescale" : 0.01,
"CommonRelInfoTools" : [ { "Type": "RelInfoVertexIsolation", "Location":"VtxIsoInfo" },
{ "Type": "RelInfoVertexIsolationBDT", "Location":"VtxIsoInfoBDT" },
{ "Type" : "RelInfoBs2MuMuBIsolations",
"RecursionLevel" : 0,
"Variables" : [],
"Location" : "BsMuMuBIsolation",
"tracktype" : 3,
"makeTrackCuts" : False, },
] # closes CommonRelInfoTools
} # closes CONFIG
} # closes default_config
class D23MuLinesConf(LineBuilder) :
"""
Builder
"""
__configuration_keys__ = ( # TrackCuts
"MinTrIPChi2",
"MaxTrChi2Dof",
"MaxTrGhp",
# CombiCuts
"MaxDoca",
"mDiffDLoose",
"mDiffDTight",
# MotherCuts
"MaxIPChi2",
"MinVDChi2",
"MaxVtxChi2Dof",
"MinDira",
"MinTau",
# scalings
"Postscale",
"D23MuPrescale",
"D2MueePrescale",
"D23PiPrescale",
"CommonRelInfoTools", )
def __init__(self, name = "D23Mu", config = default_config) :
LineBuilder.__init__(self, name, config)
#############################################################################
self.TrackCuts = """
(MIPCHI2DV(PRIMARY) > %(MinTrIPChi2)s)
& (TRCHI2DOF < %(MaxTrChi2Dof)s)
& (TRGHP < %(MaxTrGhp)s)
""" %config
self.Combination12Cuts = "(ADOCA(1,2) < %(MaxDoca)s*mm)" %config
self.CombinationCutsLoose = """
(ADAMASS(1920*MeV) < %(mDiffDLoose)s*MeV)
& (ADOCA(1,3) < %(MaxDoca)s*mm)
& (ADOCA(2,3) < %(MaxDoca)s*mm)
""" %config
self.CombinationCutsTight = """
(ADAMASS(1920*MeV) < %(mDiffDTight)s*MeV)
& (ADOCA(1,3) < %(MaxDoca)s*mm)
& (ADOCA(2,3) < %(MaxDoca)s*mm)
""" %config
self.MotherCuts = """
(BPVIPCHI2() < %(MaxIPChi2)s )
& (BPVVDCHI2 > %(MinVDChi2)s )
& (VFASPF(VCHI2/VDOF) < %(MaxVtxChi2Dof)s )
& (BPVDIRA > %(MinDira)s )
& (BPVLTIME() > %(MinTau)s*ps )
""" %config
#############################################################################
D23Mu_name = name+"D23Mu"
D2Muee_name = name+"D2Muee"
D23Pi_name = name+"D23Pi"
self.selD23Mu = self.makeD23Mu(D23Mu_name)
self.selD2Muee = self.makeD2Muee(D2Muee_name)
self.selD23Pi = self.makeD23Pi(D23Pi_name)
#############################################################################
self.D23Mu_Line = StrippingLine(D23Mu_name+"Line",
prescale = config["D23MuPrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD23Mu,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso05Dp",
"Phys/StdAllLooseMuons" :
["ConeIso05mu1", "ConeIso05mu2", "ConeIso05mu3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso10Dp",
"Phys/StdAllLooseMuons" :
["ConeIso10mu1", "ConeIso10mu2", "ConeIso10mu3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Mu : "ConeIso15Dp",
"Phys/StdAllLooseMuons" :
["ConeIso15mu1", "ConeIso15mu2", "ConeIso15mu3"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLooseMuons" :
["TrackIsoBDTmu1","TrackIsoBDTmu2","TrackIsoBDTmu3"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLooseMuons" :
["BsMuMuTrackIsomu1","BsMuMuTrackIsomu2","BsMuMuTrackIsomu3"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
)# closes Strippingline
self.D2Muee_Line = StrippingLine(D2Muee_name+"Line",
prescale = config["D2MueePrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD2Muee,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso05Dp",
"Phys/StdAllLooseMuons" : "ConeIso05mu",
"Phys/StdAllLooseElectrons" : ["ConeIso05e1", "ConeIso05e2"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso10Dp",
"Phys/StdAllLooseMuons" : "ConeIso10mu",
"Phys/StdAllLooseElectrons" : ["ConeIso10e1", "ConeIso10e2"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD2Muee : "ConeIso15Dp",
"Phys/StdAllLooseMuons" : "ConeIso15mu",
"Phys/StdAllLooseElectrons" : ["ConeIso15e1", "ConeIso15e2"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLooseMuons" : "TrackIsoBDTmu",
"Phys/StdAllLooseElectrons" : ["TrackIsoBDTe1","TrackIsoBDTe2"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLooseMuons" : "BsMuMuTrackIsomu",
"Phys/StdAllLooseElectrons" :
["BsMuMuTrackIsoe1","BsMuMuTrackIsoe2"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
) # closes Strippingline
self.D23Pi_Line = StrippingLine(D23Pi_name+"Line",
prescale = config["D23PiPrescale"],
postscale = config["Postscale"],
MDSTFlag = True,
selection = self.selD23Pi,
RelatedInfoTools = [
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 0.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso05Dp",
"Phys/StdAllLoosePions" :
["ConeIso05pi1", "ConeIso05pi2", "ConeIso05pi3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.0,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso10Dp",
"Phys/StdAllLoosePions" :
["ConeIso10pi1", "ConeIso10pi2", "ConeIso10pi3"], }, },
{ "Type" : "RelInfoConeVariables",
"ConeAngle" : 1.5,
"Variables" : [],
"RecursionLevel" : 1,
"Locations" : { self.selD23Pi : "ConeIso15Dp",
"Phys/StdAllLoosePions" :
["ConeIso15pi1", "ConeIso15pi2", "ConeIso15pi3"], }, },
{ "Type": "RelInfoTrackIsolationBDT",
"RecursionLevel" : 1,
"Variables" : 0,
"Locations": { "Phys/StdAllLoosePions" :
["TrackIsoBDTpi1","TrackIsoBDTpi2","TrackIsoBDTpi3"], }, },
{ "Type" : "RelInfoBs2MuMuTrackIsolations",
"RecursionLevel" : 1,
"Variables" : [],
"IsoTwoBody" : True,
"Locations" : { "Phys/StdAllLoosePions" :
["BsMuMuTrackIsopi1","BsMuMuTrackIsopi2","BsMuMuTrackIsopi3"] ,}, },
] + config["CommonRelInfoTools"] # end of RelatedInfoTools
) # closes Strippingline
#############################################################################
self.registerLine(self.D23Mu_Line)
self.registerLine(self.D2Muee_Line)
self.registerLine(self.D23Pi_Line)
#############################################################################
def makeD23Mu(self,name):
D23Mu = DaVinci__N3BodyDecays("Combine"+name)
D23Mu.DecayDescriptors = [ "[D+ -> mu+ mu+ mu-]cc","[D+ -> mu+ mu+ mu+]cc" ]
D23Mu.DaughtersCuts = { "mu+" : self.TrackCuts }
D23Mu.Combination12Cut = self.Combination12Cuts
D23Mu.CombinationCut = self.CombinationCutsLoose
D23Mu.MotherCut = self.MotherCuts
_myMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
return Selection (name, Algorithm = D23Mu, RequiredSelections = [ _myMuons ])
#############################################################################
def makeD2Muee(self,name):
D2Muee = DaVinci__N3BodyDecays("Combine"+name)
D2Muee.DecayDescriptors = [ "[D+ -> mu+ e+ e-]cc","[D+ -> mu- e+ e+]cc","[D+ -> mu+ e+ e+]cc" ]
D2Muee.DaughtersCuts = { "mu+" : self.TrackCuts, "e+" : self.TrackCuts }
D2Muee.Combination12Cut = self.Combination12Cuts
D2Muee.CombinationCut = self.CombinationCutsLoose
D2Muee.MotherCut = self.MotherCuts
_myMuons = DataOnDemand(Location = "Phys/StdLooseMuons/Particles")
_myElectrons = DataOnDemand(Location = "Phys/StdLooseElectrons/Particles")
return Selection (name, Algorithm = D2Muee, RequiredSelections = [ _myMuons, _myElectrons ])
#############################################################################
def makeD23Pi(self,name):
D23Pi = DaVinci__N3BodyDecays("Combine"+name)
D23Pi.DecayDescriptors = [ "[D+ -> pi+ pi+ pi-]cc" ]
D23Pi.DaughtersCuts = { "pi+" : self.TrackCuts }
D23Pi.Combination12Cut = self.Combination12Cuts
D23Pi.CombinationCut = self.CombinationCutsTight
D23Pi.MotherCut = self.MotherCuts
_myPions = DataOnDemand(Location = "Phys/StdLoosePions/Particles")
return Selection (name, Algorithm = D23Pi, RequiredSelections = [ _myPions ])
#############################################################################
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
11ef771bc27ddb7e52d28fc879db132d23f2b521
|
7ba87be3b62ebd3647762984d7190d4c0abc4aa1
|
/patternnum5.py
|
b350698aede947d327dc28bd0f2394ee37f3fe91
|
[] |
no_license
|
pradeepdevloper1/Learn_Python_CN
|
7ca0d70e2cb5050bff2e090ec08cf97e4c38da2e
|
cf2978f52c68511cb93c03922aecae570ae9b009
|
refs/heads/main
| 2023-02-03T23:39:17.738748
| 2020-12-22T18:29:58
| 2020-12-22T18:29:58
| 323,703,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
## Read input as specified in the question
## Print the required output in given format
n=int(input())
if 0<=n<=50:
i=1
while i<=n:
j=1
while j<=i:
print(j,end='')
j+=1
print()
i+=1
|
[
"pradeep.kumar@worldfashionexchange.com"
] |
pradeep.kumar@worldfashionexchange.com
|
d81ccc034bd7fcfa57f92932c57ec419606bb49a
|
0f3138e1c353b1bdc9a6252b6f06c8266767681f
|
/sparvbot/utils.py
|
ba19a7bfef0ce71a4250a71acd7a228829d6c87c
|
[] |
no_license
|
Sparvnastet/sparvbot
|
46ade580082a49c185278ecfd644a2d9d6a24cdb
|
9b17ed7ab110d44f5ecf874017d5da063dc94aa8
|
refs/heads/master
| 2021-01-01T16:25:31.908508
| 2014-02-27T21:19:37
| 2014-02-27T21:19:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
import sys
if 'gevent' in sys.modules:
import zmq.green as zmq
else:
import zmq
zmq_ctx = zmq.Context()
|
[
"adis@blad.is"
] |
adis@blad.is
|
c271cd860b5edf80188b50c7092ffd39060d9bd0
|
43dd3690516cb86641899d86599c0a1f79a8c97e
|
/src/lex_learn.py
|
5138c9fd245cf46f1ae6021ca5edebdd637283d0
|
[] |
no_license
|
AlexTaran/automorphology
|
506e1a8b7791aec2b9f172be071bf568513daf4e
|
744943f41acdd8c1e869f3e8df1a43d712d13afe
|
refs/heads/master
| 2021-01-01T19:50:29.369560
| 2015-04-25T22:33:37
| 2015-04-25T22:33:37
| 34,588,669
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,230
|
py
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Learn on Russian and apply to Croatian VS. Learn on Croatian and apply to Croatian
# Also build automorphology
import argparse
import codecs
import random
import cPickle
import datetime
import build_features
from collections import defaultdict
import spanish
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedKFold
LEARN_FEATURES_IDS = [5, 6] + [7, 8] + [9, 10, 11, 12, 13, 14, 15, 16] + [17, 18, 19, 20, 21, 22] + [23] #+ [24, 25, 26]
learn_params = {'oob_score': True, 'n_estimators': 30}
def convert_features(features, answer_id=2, bin_features = [],
float_features=LEARN_FEATURES_IDS):
converted = []
for feat in features:
converted.append([feat[f_name] for f_name in build_features.OUTPUT_FEATURES])
return process_features(converted)
def load_features(filename):
with codecs.open(filename, encoding='utf-8') as f:
lines = [l.strip().split('\t') for l in f]
return process_features(lines)
def process_features(feat_data, answer_id=2, bin_features = [],
float_features=LEARN_FEATURES_IDS):
lines = feat_data
Y = [(1 if l[answer_id] == 'True' else 0) for l in lines]
binmap = {}
for fid in bin_features:
binmap[fid] = sorted(list(set([l[fid] for l in lines])))
X = []
for line in lines:
feats = []
for fid in float_features:
feats.append(float(line[fid]))
for fid in bin_features:
for x in binmap[fid]:
feats.append(1 if x==line[fid] else 0)
X.append(feats)
return np.array(X), np.array(Y), lines
def CV_estimate(X, Y, folds=10):
skf = StratifiedKFold(Y, n_folds=folds)
scores = []
for train_idx, test_idx in skf:
lr = LogisticRegression()
lr = RandomForestClassifier(**learn_params)
lr.fit(X[train_idx], Y[train_idx])
scores.append(lr.score(X[test_idx], Y[test_idx]))
return np.average(scores), np.std(scores)
def output_morphology(filename, morphology):
total_forms = sum([len(v) for k, v in morphology.iteritems()])
print 'Saving automorphology: %d items with total %d forms' % (len(morphology), total_forms)
with codecs.open(filename, 'w', encoding='utf-8') as f:
for k, words in sorted(morphology.items()):
print >> f, '\t'.join([k] + list(words))
def lex_learn(args, langs):
print "Loading nuts features..."
nX, nY, nL = load_features(args.nuts_input)
print "Loading croatian features..."
cX, cY, cL = load_features(args.croatian_learn_data)
print "Loading spanish features..."
sX, sY, sL = load_features(args.spanish_learn_data)
print "Learning..."
cls_weights = {1: 1.0, 0: 1.0}
model_ru = LogisticRegression()
model_ru = RandomForestClassifier(**learn_params)
model_ru.fit(nX, nY)
cnt = 0
for i, label in enumerate(model_ru.predict(cX)):
if label != cY[i]:
#print i, ' '.join(cL[i]), cX[i], cY[i], label
cnt += 1
model_hr = LogisticRegression()
model_hr = RandomForestClassifier(**learn_params)
model_hr.fit(cX, cY)
for i, label in enumerate(model_hr.predict(nX)):
if label != nY[i]:
#print i, ' '.join(nL[i]), nX[i], nY[i], label
pass
model_es = LogisticRegression()
model_es = RandomForestClassifier(**learn_params)
model_es.fit(sX, sY)
print "Total errors: %d" % cnt
print "Rus -> Cro score:", model_ru.score(cX, cY)
print "Cro -> Rus score:", model_hr.score(nX, nY)
print 'OOB RU: %d' % model_ru.oob_score_
print 'OOB HR: %d' % model_hr.oob_score_
print 'OOB ES: %d' % model_es.oob_score_
print model_es.get_params()
print '--- importances ---'
for imp, i in sorted(zip(model_es.feature_importances_, LEARN_FEATURES_IDS)):
print '%f %s %d' % (imp, build_features.OUTPUT_FEATURES[i], i)
print '-------------------'
#print "Rus -> Rus CV estimate: %f (std %f)" % CV_estimate(nX, nY)
#print "Cro -> Cro CV estimate: %f (std %f)" % CV_estimate(cX, cY)
# Generating croatian morphology:
if langs['HR']:
hr_from_hr_morphology = defaultdict(set)
hr_from_ru_morphology = defaultdict(set)
hr_from_es_morphology = defaultdict(set)
feat_group = []
model_morph = zip([model_hr, model_ru, model_es],
[hr_from_hr_morphology, hr_from_ru_morphology,
hr_from_es_morphology])
for feat_no, feat in enumerate(build_features.build_croatian(args, True)):
feat_group.append(feat)
if len(feat_group) >= 10000:
print 'Generating HR morphology... feat %d' % (feat_no)
convertedX, convertedY, convertedL = convert_features(feat_group)
for model, morph in model_morph:
for i, label in enumerate(model.predict(convertedX)):
if label:
w1 = feat_group[i]['w1'].lower()
w2 = feat_group[i]['w2'].lower()
#print 'Detected close forms: %s %s' % (w1, w2)
morph[w1].add(w2)
morph[w2].add(w1)
feat_group = []
output_morphology(args.hr_from_hr_automorphology, hr_from_hr_morphology)
output_morphology(args.hr_from_ru_automorphology, hr_from_ru_morphology)
output_morphology(args.hr_from_es_automorphology, hr_from_es_morphology)
# Generating russian morphology
if langs['RU']:
ru_from_ru_morphology = defaultdict(set)
ru_from_hr_morphology = defaultdict(set)
ru_from_es_morphology = defaultdict(set)
feat_group = []
model_morph = zip([model_hr, model_ru, model_es],
[ru_from_hr_morphology, ru_from_ru_morphology,
ru_from_es_morphology])
for feat_no, feat in enumerate(build_features.build_nuts(args, True)):
feat_group.append(feat)
if len(feat_group) >= 10000:
print 'Generating RU morphology... feat %d' % (feat_no)
convertedX, convertedY, convertedL = convert_features(feat_group)
for model, morph in model_morph:
for i, label in enumerate(model.predict(convertedX)):
if label:
w1 = feat_group[i]['w1'].lower()
w2 = feat_group[i]['w2'].lower()
#print 'Detected close forms: %s %s' % (w1, w2)
morph[w1].add(w2)
morph[w2].add(w1)
'''for i, label in enumerate(model_ru.predict(convertedX)):
if label == True:
w1 = feat_group[i]['w1'].lower()
w2 = feat_group[i]['w2'].lower()
#print 'Detected close forms: %s %s' % (w1, w2)
ru_from_ru_morphology[w1].add(w2)
ru_from_ru_morphology[w2].add(w1)
for i, label in enumerate(model_hr.predict(convertedX)):
if label:
w1 = feat_group[i]['w1'].lower()
w2 = feat_group[i]['w2'].lower()
#print 'Detected close forms: %s %s' % (w1, w2)
ru_from_hr_morphology[w1].add(w2)
ru_from_hr_morphology[w2].add(w1)'''
feat_group = []
output_morphology(args.ru_from_hr_automorphology, ru_from_hr_morphology)
output_morphology(args.ru_from_ru_automorphology, ru_from_ru_morphology)
output_morphology(args.ru_from_es_automorphology, ru_from_es_morphology)
# Generating spanish morphology
if langs['ES']:
es_from_es_morphology = defaultdict(set)
es_from_ru_morphology = defaultdict(set)
es_from_hr_morphology = defaultdict(set)
feat_group = []
model_morph = zip([model_hr, model_ru, model_es],
[es_from_hr_morphology, es_from_ru_morphology,
es_from_es_morphology])
for feat_no, feat in enumerate(spanish.build_spanish(args, True)):
feat_group.append(feat)
if len(feat_group) >= 10000:
print 'Generating ES morphology... feat %d' % (feat_no)
convertedX, convertedY, convertedL = convert_features(feat_group)
for model, morph in model_morph:
for i, label in enumerate(model.predict(convertedX)):
if label:
w1 = feat_group[i]['w1'].lower()
w2 = feat_group[i]['w2'].lower()
#print 'Detected close forms: %s %s' % (w1, w2)
morph[w1].add(w2)
morph[w2].add(w1)
feat_group = []
output_morphology(args.es_from_es_automorphology, es_from_es_morphology)
output_morphology(args.es_from_ru_automorphology, es_from_ru_morphology)
output_morphology(args.es_from_hr_automorphology, es_from_hr_morphology)
def main():
start = datetime.datetime.now()
random.seed()
parser = argparse.ArgumentParser(description='Croatian-Russian learner')
parser.add_argument('--nuts-input', help='Learning data for russian',
default='../data/nuts/learn_nuts.txt')
parser.add_argument('--croatian-learn-data', help='Learning data for croatian',
default='../data/slavic/learn_croatian.txt')
parser.add_argument('--spanish-learn-data', help='Learning data for spanish',
default='../data/es/learn_es.txt')
parser.add_argument('--hr-from-hr-automorphology', help='Croatian automorphology save file',
default='../data/slavic/hr_from_hr_automorphology.txt')
parser.add_argument('--hr-from-ru-automorphology', help='Croatian automorphology save file',
default='../data/slavic/hr_from_ru_automorphology.txt')
parser.add_argument('--hr-from-es-automorphology', help='Croatian automorphology save file',
default='../data/slavic/hr_from_es_automorphology.txt')
parser.add_argument('--ru-from-ru-automorphology', help='Russian automorphology save file',
default='../data/nuts/ru_from_ru_automorphology.txt')
parser.add_argument('--ru-from-hr-automorphology', help='Russian automorphology save file',
default='../data/nuts/ru_from_hr_automorphology.txt')
parser.add_argument('--ru-from-es-automorphology', help='Russian automorphology save file',
default='../data/nuts/ru_from_es_automorphology.txt')
parser.add_argument('--es-from-es-automorphology', help='Spanish automorphology save file',
default='../data/es/es_from_es_automorphology.txt')
parser.add_argument('--es-from-ru-automorphology', help='Spanish automorphology save file',
default='../data/es/es_from_ru_automorphology.txt')
parser.add_argument('--es-from-hr-automorphology', help='Spanish automorphology save file',
default='../data/es/es_from_hr_automorphology.txt')
# Args only for build_features
parser.add_argument('--croatian-input', help='Croatian CONLLX corpus', default='../data/slavic/croatian.conllx')
parser.add_argument('--croatian-text-vectors', help='Word2vec Croatian vectors in text format', default='../data/slavic/croatian_vectors.txt')
parser.add_argument('--croatian-wikidata', help='Croatian wikipedia data corpus', default='../data/temp/hr_data.txt')
parser.add_argument('--russian-wikidata', help='Russian wikipedia data corpus', default='../data/temp/ru_data.txt')
parser.add_argument('--nuts-corpus', help='Russian nuts-corpus', default='../data/nuts/corpus.cpickle')
parser.add_argument('--nuts-text-vectors', help='Word2vec Russian vectors in text format', default='../data/nuts/word2vec_vectors.txt')
parser.add_argument('--es-corpus', help='Spanish tagged corpus', default='../data/es/tagged_utf')
parser.add_argument('--es-text-vectors', help='Word2vec Spanish vecs built by wiki (text format)', default='../data/es/spanish_wiki_vectors.txt')
parser.add_argument('--spanish-wikidata', help='Spanish wikipedia data corpus', default='../data/es/es_data.txt')
parser.add_argument('--pos-tags', help='Pos-tags to take for analysis', default='ANVR')
parser.add_argument('--gen-pos', help='Positive examples to generate', type=int, default=80000)
parser.add_argument('--gen-neg', help='Negative examples to generate', type=int, default=80000)
args = parser.parse_args()
print 'Running with args:', args
lex_learn(args, {
'RU': False,
'HR': True,
'ES': False
})
finish = datetime.datetime.now()
print 'Time to run:', finish-start
if __name__=="__main__":
main()
|
[
"alex.y.taran@gmail.com"
] |
alex.y.taran@gmail.com
|
2641f8cc229f66d4bca9f369bd59614106063569
|
e8d5eb846842c5145316409ec64a4f393eeeb07d
|
/dp_baekjoon_9184.py
|
6110d41fe4277caee520519b777baaa659f268ce
|
[] |
no_license
|
SheepEatLion/Algorithms
|
157ac6eb5338d5c6176b66878970ffceaa1f940e
|
992168b05c524369a4dbd037a04e4c94a8d2c140
|
refs/heads/main
| 2023-06-30T11:43:21.917953
| 2021-08-05T01:37:44
| 2021-08-05T01:37:44
| 365,987,864
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
import sys
memo = dict()
while True:
A, B, C = map(int, sys.stdin.readline().split())
if A == -1 and B == -1 and C == -1:
break
print('w({}, {}, {}) = '.format(A, B, C), end='')
def w(a, b, c):
if (a, b, c) in memo:
return memo[(a, b, c)]
if a<=0 or b<=0 or c<=0:
memo[(a, b, c)] = 1
return memo[(a, b, c)]
if a > 20 or b > 20 or c > 20:
return w(20, 20, 20)
if a < b < c:
memo[(a, b, c)] = w(a, b, c - 1) + w(a, b - 1, c - 1) - w(a, b - 1, c)
return memo[(a, b, c)]
else:
memo[(a, b, c)] = w(a - 1, b, c) + w(a - 1, b - 1, c) + w(a - 1, b, c - 1) - w(a - 1, b - 1, c - 1)
return memo[(a, b, c)]
print(w(A, B, C))
'''
정해진 재귀함수를 dp로 바꾸는 문제이다.
먼저 1, 1, 1의 예시를 보면 다음과 같이 동작한다.
w(0, 1, 1) + w(0, 0, 1) + w(0, 1, 0) - w(0, 0, 0)
w(0, 1, 1) return 1
w(0, 0, 1) return 1
전부 1로 리턴되어 3-1이 됨으로 답은 2가 출력된다.
처음 이 문제를 봤을 때는 방향을 잡지 못했다.
재귀를 dp로 바꿔야 한다고 생각해서 모든 재귀를 다 dp로 어떻게 바꿔야할 지 너무 애매했기 때문이다.
그래서 어쩔 수 없이 다른 분들의 코드를 참고했다.
보니까 전부 dp로 옮기는 게 아니라, 재귀반, dp반 형식으로 이루어져야 하는 거였다.
다시 말해, dp에서의 메모이제이션이라는 특성을 가져와서 재귀는 계속 반복하되 메모를 해나가면서 반복하게 된다.
그리고 재귀가 일어날때 마다 가장 먼저 그 값이 이미 메모 해둔 값인지를 확인한다.
그러면 결국 입력데이터가 쌓일 수록 메모또한 증가하고 해당 메모를 만나면 더 이상 재귀를 하지 않고 정답을 알고 있기 때문에
해당 값을 그대로 리턴하게 되면서 가면 갈수록 재귀를 하는 횟수가 줄어들게 된다.
dp 문제를 풀이할 생각에 dp 로 다 구현해야한다는 고정관념이 있던 것 같다.
이 문제는 재귀를 유지하면서 dp의 특성을 활용해 동작 시간을 최소로 줄여보는 것이 목표였다.
'''
|
[
"gowjr207@gmail.com"
] |
gowjr207@gmail.com
|
16a022ce2a690a35a8a10124359ea68c2b3f76b7
|
327e84ccc0d6cb4974186bf18cd513dfcb8b06da
|
/veiculos/migrations/0002_veiculo_valor.py
|
884522474b77fdcc2095e9f60a267eebd3eea2ab
|
[] |
no_license
|
JoaoLucasMB/programacao_comercial
|
cc8b57a286dd03018c124ce2ab2e6a4baf206dd4
|
d7f2f50365ac085a4029b29fb1ddf5c39064bb12
|
refs/heads/main
| 2023-01-31T21:51:22.466716
| 2020-12-14T23:33:54
| 2020-12-14T23:33:54
| 313,310,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.1.2 on 2020-11-23 23:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('veiculos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='veiculo',
name='valor',
field=models.IntegerField(default=15000),
),
]
|
[
"jaum998@gmail.com"
] |
jaum998@gmail.com
|
e27635633e6f0adc9349ef2572eaf2bf445bbf9f
|
035853d5a569123412058061e4edea9bafe67021
|
/R-Python-scripts/Jumpcount.py
|
4d46b2be7e455193f75f50fa66cfcf2b2a2f6d2c
|
[] |
no_license
|
XuetingQiu/FluDiffusionUS
|
0d7c48814815ac704b16eff93edb57e8e5ba5566
|
2cce52707152bdb91feeb32ff245ae2ca172cd92
|
refs/heads/master
| 2020-04-19T20:08:56.136810
| 2019-11-01T18:26:28
| 2019-11-01T18:26:28
| 168,407,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
#JumpExtractor.py: Extracts Markov jump counts from a full jump history log file.
#for other use, this script needs to update the source of discrete trait state, the input of the log file, and the count of steps in the log file. Here it is 6880.
def main():
source = ['AF','EAP','ECA','LAC','MENA','NA','SAS','USA']
# for clock in range(7):
infile = open("H1N1-pdasub-trait_geo_compjumpHistory-comb.log", "r")
count = {}
for line in infile:
jumps = line.split("},{")
for i in jumps:
packet = i.split(",")
if packet[2]+" "+packet[3] in count:
count[packet[2]+" "+packet[3]] += 1/6880
else:
count[packet[2]+" "+ packet[3]] = 1/6880
for x in source:
for y in source:
if x != y:
if x+" "+y not in count:
count[x+" "+y] = 0
report = list(count.items())
report.sort()
for item in report:
locations, average = item
print(locations,average)
infile.close()
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4713ab6f5aae7a77619093867729676a2e15c423
|
5aefcc44dd516815cd98ccef27a2de4acefba4d9
|
/otp.py
|
c80885e00825b8367b8d36f31d0498dffd7b5cc8
|
[] |
no_license
|
hanshanglin/polyu-comp3334-project
|
f9bc962316b7179a82ff65d99f77ce71e6a09868
|
a70103079ba05b16b8a0afdc0bb238f855cac645
|
refs/heads/master
| 2022-07-03T18:56:32.632449
| 2020-05-19T04:00:17
| 2020-05-19T04:00:17
| 261,059,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,534
|
py
|
import time
import random
import math
import os
import modapk_win
__PLATFORM__="WINDOWS"
class OTP:
TickIntervalSeconds=30
PasscodeLength=6
_rng=random.SystemRandom()
def __init__(self, tick_interval, passcode_len):
self.TickIntervalSeconds=int(abs(tick_interval))
self.PasscodeLength=int(abs(passcode_len))
def generate_seed(self):
seed=0
for n in range(1,self.PasscodeLength+1):
seed=seed*10+self._rng.randint(0,10)
return seed
def _get_current_interval(self):
sec=int(math.floor(time.time()))
return sec//self.TickIntervalSeconds
def _get_current_passcode(self, seed):
tick=self._get_current_interval()
p=tick*tick*seed % (10**self.PasscodeLength)
return p
def check(self, seed, passcode):
_passcode=int(passcode)
if isinstance(passcode, str):
_passcode=0
a=1
for i in range(self.PasscodeLength):
_passcode=_passcode+a*int(passcode[-1-i])
a*=10
return self._get_current_passcode(seed)==_passcode
def get_OTP_client_android(self, seed, designated_name=None):
if designated_name==None:
designated_name="%08x"%random.getrandbits(32)+".apk"
if __PLATFORM__=="WINDOWS":
modapk_win.genapk(designated_name,seed)
return designated_name
os.system("./modapk/modapk.sh OTP_PoC.apk 1.apk -w assets/OTP_seed.txt "+str(seed))
os.replace("/modapk/1.apk", "designated_name")
return designated_name
def get_OTP_client_win(self, seed, designated_name=None):
if designated_name==None:
designated_name="%08x"%random.getrandbits(32)+".py"
f=open(designated_name, "w")
f.write("TickIntervalSeconds="+str(self.TickIntervalSeconds))
f.write("\nPasscodeLength="+str(self.PasscodeLength))
f.write("\nseed="+str(seed))
f.write("\n")
template=open("otp_win_template.py", "r")
f.write(template.read())
return designated_name
OTPPreset=OTP(30,6)
if __name__=="__main__":
_otpobject=OTP(30,6)
_sd=_otpobject.generate_seed()
print("time: ", time.time())
print(_otpobject._get_current_interval())
print(_otpobject._get_current_passcode(_sd))
print(_otpobject.get_OTP_client_win(_sd))
print(_otpobject.check(_sd,str(_otpobject._get_current_passcode(_sd)).zfill(6)))
|
[
"noreply@github.com"
] |
noreply@github.com
|
0567fe4ebe88254f30e3acdf850eb08984dcd1e3
|
8a1f1d72d2672bedc81bc452090110ac3b442852
|
/dashboard/venv/lib/python3.6/_collections_abc.py
|
e54681d9f004aad0fe6cfcae94799ca27f04d4cb
|
[] |
no_license
|
lilyli333/HH-Intervention-Dashboard
|
2386b97d6ebb705da0d6f6b7c4b4538db27339d7
|
e55f3751e0780c700fc6b486100d87f0141e891f
|
refs/heads/master
| 2020-03-21T04:29:43.159523
| 2018-06-21T02:47:21
| 2018-06-21T02:47:21
| 138,111,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
/Users/lilili/miniconda3/lib/python3.6/_collections_abc.py
|
[
"lily.jiayu@gmail.com"
] |
lily.jiayu@gmail.com
|
f61b69e59a5f3df86af281eb1cb0ccc016d7d18e
|
63d28241de5d5f8f6ea998865124106761eba317
|
/beaker/__init__.py
|
2d669b3124144289cba77e9e8b1cf96bb56f896c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
isabella232/beaker-1
|
e2aba9947d176c4683921b862a6168af68d7e7f6
|
3d5b74a61eaadd28d6917ab039fb9292cbc533ef
|
refs/heads/master
| 2022-01-05T20:44:55.956425
| 2019-06-17T17:07:58
| 2019-06-17T17:07:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
__version__ = '1.9.0+dd.26'
|
[
"noreply@github.com"
] |
noreply@github.com
|
4e73ef6743cfa15be491eaee70edc8f3aa23a9d4
|
1f6e3f64cdb087b2c294b3e4c0406c3674b43e7a
|
/notification_request_queue.py
|
dd75d7a503f7c9a78f291bbf8534c0c15e5062d1
|
[] |
no_license
|
rajithakalapatapu/chatclient-offlinemessage-storagedelivery
|
cfd6c1102144b3d1fcd8675593f1c6006a6c8c34
|
ab06c8b0e06d39c4f8c5a6eb7824125fc7f213d6
|
refs/heads/master
| 2021-05-17T15:42:35.798607
| 2019-04-25T03:01:51
| 2019-04-25T03:01:51
| 250,850,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
"""
Name: Venkata Adilakshmi Rajitha Kalapatapu
Student ID: 1001682465
Login ID: vxk2465
"""
import queue
class NotificationRequestQueue:
def __init__(self):
# Start a queue
self.request_queue = queue.Queue()
def add_request(self, student, course, approval_decision):
try:
# store incoming data into the queue
# store it asynchronously - queue takes care of locks
self.request_queue.put((student, course, approval_decision), False)
return True
except queue.Full as e:
print(e)
return False
def get_all_cleared_requests(self):
pending_requests = []
try:
# as long as queue has items, get data
while not self.request_queue.empty():
# get data asynchronously - queue takes care of locks
# if there's no data throw queue.Empty error within 1 second
pending_request = self.request_queue.get(False, 1)
# store it in a list to be returned
pending_requests.append(pending_request)
# remove item from task
self.request_queue.task_done()
except queue.Empty as e:
print("Nothing to return at the moment")
return pending_requests
except ValueError as e:
print("We had more tasks done than tasks in the queue")
return pending_requests
return pending_requests
if __name__ == "__main__":
q = NotificationRequestQueue()
q.add_request("student1", "course1", True)
q.add_request("student1", "course2", False)
q.add_request("student2", "course2", True)
print(q.get_all_cleared_requests())
print("Queue empty at the end? {}".format([] == q.get_all_cleared_requests()))
|
[
"rajithakalapatapu9@gmail.com"
] |
rajithakalapatapu9@gmail.com
|
cbaecb462463a2a78f6aa6396f7b0b1f959eabfb
|
57cbbe35b1b5a7aa40b732aa6a9be3f7bfd5291f
|
/RunAlgorithm.py
|
70756cbd03ea95b39dccef0b3aad1e7377797017
|
[] |
no_license
|
JoeyChaps/regularization
|
18d1fb1233231ebd08496ff9a4b8db57d84e2055
|
235f6c58b28025ef9086e76aa5f7565c98753e97
|
refs/heads/master
| 2020-04-15T15:17:33.461376
| 2014-12-31T05:31:44
| 2014-12-31T05:31:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,633
|
py
|
#!/usr/bin/python
#######
# File: RunAlgorithm.py
# Project: Regularization
# Author: Joe Chapman
# Date: 9-8-14
# Url: https://github.com/JoeyChaps/regularization
#######
import Regularizer
import DataRandomizer
from time import gmtime, strftime
import os
import math
import shutil
import sys
import getopt
def isnumber(s):
# Returns true if s is numeric
bResult = True
try:
i = float(s)
except ValueError, TypeError:
bResult = False
return bResult
def loadData(dataFile):
# Reads the csv dataFile and returns a 2d list of pattern data
readFile = open(dataFile, 'r')
count = 0
prev = 0
cols = 0
a_pats = []
bDeleteColumn = False
a_deleteCols = []
for line in readFile:
prev = cols
a_rowp = []
a_rowp = line.split(",")
a_rown = []
cols = len(a_rowp)
for c in range(0, cols):
if isnumber(a_rowp[c]):
a_rown.append(a_rowp[c])
else:
if not c in a_deleteCols:
a_deleteCols.append(c)
cols = len(a_rown)
if (cols != prev):
if (count != 0):
print("unequal line counts: " + str(cols) + " and " + str(prev) \
+ " at line " + str(count) + "\n")
count += 1
a_pats.append(a_rown)
for c in a_deleteCols:
print("in load data, deleted column " + str(c))
readFile.close()
return list(a_pats)
def convertClasses(a_pattern, index, targetVal):
# Replaces class values not equal to the targetVal with -1
a_pat = list(a_pattern)
if (a_pat[index] != targetVal):
a_pat[index] = -1
return list(a_pat)
def prepPats(a_pats, clsIndx, cols, bConvert):
# Makes sure pattern values are floats and, if bConvert is true,
# converts class values to 1 and -1
a_tmp = []
for a_pat in a_pats:
for c in range(0, cols + 1):
a_pat[c] = float(a_pat[c])
if (bConvert):
a_pat = convertClasses(a_pat, clsIndx, 1)
a_tmp.append(a_pat)
return list(a_tmp)
def prepLambdas(sLambdas):
# Makes sure lambda values are floats
a_tmp = []
a_lams = sLambdas.split(",")
for lam in a_lams:
a_tmp.append(float(lam))
return list(a_tmp)
def main(argv):
patsLim = -1 # This number determines how many patterns will be used in
# this run of the program. The number is the total number
# of train and test patterns combined. Or set patsLim to -1
# to use all the available patterns in the original data
# set (the file referenced by the fileName variable below),
# however many there may be.
bRefreshData = True # When bRefreshData is true, the program generates
# new data files for the training and test sets,
# randomly selecting and ordering the data for the
# new program run. When bRefreshData is false, the
# program uses previously generated data files
# stored in the saved_data directory.
a_lambdas = "0"
projectName = "regularize"
fileName = ""
savedDataDir = "saved_data"
savedTrainingFile = "\\regularize_train.csv"
rTrainFileName = ""
dataSource = ""
a_trainPats = []
nTrainPats = 0
bConvertClass = False
ncols = -1
classIndex = 1
xIn = 0
yIn = 1
transNum = 3
dataFunc = lambda x: 1 + 9 * x**2
opt = []
arg = []
try:
opts, args = getopt.getopt(argv, "l:p:d", ["lambdas=", "pats=", "data"])
except getopt.GetoptError as err:
print("blarf")
print(str(err))
sys.exit(2)
for opt, arg in opts:
if opt in ("-p", "--pats"):
patsLim = int(arg)
elif opt in ("-d", "--data"):
bRefreshData = False
elif opt in ("-l", "--lambdas"):
a_lambdas = arg
transNum = 3
nowTime = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
newOutputDir = ""
otherFileName = ""
if patsLim < 2:
patsLim = 5
a_lambdas = prepLambdas(a_lambdas)
if bRefreshData:
data_randomizer = DataRandomizer.DataRandomizer(
fileName, projectName, patsLim)
data_randomizer.generateData(patsLim, dataFunc)
rTrainFileName = data_randomizer.getTrainFile()
dataSource = data_randomizer.getDataDirectory()
else:
rTrainFileName = savedDataDir + savedTrainingFile
a_trainPats = loadData(rTrainFileName)
ncols = len(a_trainPats[0]) - 1
a_trainPats = prepPats(a_trainPats, classIndex, ncols, bConvertClass)
nTrainPats = len(a_trainPats)
newOutputDir = "output\\out_" + nowTime + "_" + str(nTrainPats)
if not os.path.exists(newOutputDir):
os.makedirs(newOutputDir)
if bRefreshData:
shutil.move(dataSource, newOutputDir)
else:
dataDir = newOutputDir + "\\data"
if not os.path.exists(dataDir):
os.makedirs(dataDir)
shutil.copy(rTrainFileName, dataDir)
if otherFileName:
shutil.copy(otherFileName, dataDir)
reg = Regularizer.Regularizer(newOutputDir)
reg.runAlgorithm(classIndex, xIn, yIn, ncols, transNum, nTrainPats,
a_lambdas, a_trainPats, dataFunc)
print("\nDone!")
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"jchapmantx@hotmail.com"
] |
jchapmantx@hotmail.com
|
13bd0e18fa8d78f69cd492c552c543a620fc6260
|
72a77764946ed570d96ae673103f39c35ebf0e00
|
/test_unittest_1.py
|
5fb24715d824b6ce1ba8b39cd7fe386de52eab37
|
[] |
no_license
|
WasAlexHere/AutomationTest
|
029094b41c467540fe92a70400300bdacd57f99a
|
b8d3f37e1d53815dbb2f1005907559c9bbb86376
|
refs/heads/master
| 2023-05-13T17:28:52.550554
| 2020-08-02T14:41:17
| 2020-08-02T14:41:17
| 280,935,249
| 0
| 0
| null | 2021-06-02T02:43:47
| 2020-07-19T19:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,814
|
py
|
import unittest
from selenium import webdriver
class TestLogin(unittest.TestCase):
browser = webdriver.Chrome()
def test_login1(self):
link = "http://suninjuly.github.io/registration1.html"
self.browser.get(link)
# Ваш код, который заполняет обязательные поля
input1 = self.browser.find_element_by_css_selector(".first:required")
input1.send_keys("Ivan")
input2 = self.browser.find_element_by_css_selector(".second:required")
input2.send_keys("Petrov")
input3 = self.browser.find_element_by_css_selector(".third:required")
input3.send_keys("Petrov")
# Отправляем заполненную форму
button = self.browser.find_element_by_tag_name("button")
button.click()
# находим элемент, содержащий текст
welcome_text_elt = self.browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
self.assertEqual("Congratulations! You have successfully registered!", welcome_text, "Text is incorrect!")
#self.browser.quit()
def test_login2(self):
link1 = "http://suninjuly.github.io/registration2.html"
self.browser.get(link1)
# Ваш код, который заполняет обязательные поля
input1 = self.browser.find_element_by_css_selector(".first:required")
input1.send_keys("Ivan")
input2 = self.browser.find_element_by_css_selector(".second:required")
input2.send_keys("Petrov")
input3 = self.browser.find_element_by_css_selector(".third:required")
input3.send_keys("Petrov")
# Отправляем заполненную форму
button = self.browser.find_element_by_tag_name("button")
button.click()
# находим элемент, содержащий текст
welcome_text_elt = self.browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
self.assertEqual("Congratulations! You have successfully registered!", welcome_text, "Text is incorrect!")
self.browser.quit()
if __name__ == "__main__":
unittest.main()
|
[
"sethtorston@mail.ru"
] |
sethtorston@mail.ru
|
6685eda1a70bab345ddc6f996c018feac6a6c741
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03371/s173081591.py
|
e631cebeb539f9cb5923fd6e498f3a402e717958
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import sys
read = sys.stdin.read
readline = sys.stdin.buffer.readline
sys.setrecursionlimit(10 ** 8)
INF = float('inf')
MOD = 10 ** 9 + 7
def main():
A, B, C, X, Y = map(int, readline().split())
ans = A*X+B*Y
if X>=Y:
ans = min(ans, C*Y*2+A*(X-Y), C*X*2)
else:
ans = min(ans, C*X*2+B*(Y-X), C*Y*2)
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1b48913b917116ee78220345bf0711faabb87c05
|
4d595d16ff3479b2acea959f43ce7e5be46d74ce
|
/kNN.py
|
7bcc3f7eea3daa643073fb7024b134970d37fc9e
|
[] |
no_license
|
WnFg/Machine-Learning
|
0f120fae457bed3c33f092d97bf40051a0005ba3
|
17090779998988c98c5d36e2c59fe28038b72630
|
refs/heads/master
| 2020-05-17T21:58:20.860201
| 2015-08-13T01:53:12
| 2015-08-13T01:53:12
| 40,249,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
from numpy import *
import operator
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
|
[
"496100630@qq.com"
] |
496100630@qq.com
|
21d259e2bd0230f61d9018f3536a28303133178b
|
e8d4fe2361d71aef6519f666152f14137156159c
|
/impacket-0.9.11/build/lib.linux-i686-2.6/impacket/dcerpc/dcerpc_v4.py
|
5099c5aaaf72e06ee8a13bc334ed7399833f905f
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Apache-1.1",
"BSD-2-Clause"
] |
permissive
|
kenzshi/DDoSProject
|
11d7e676a150964a9f78f1b7e1df4468dd9d973f
|
9587a2be7f4773d19a96a35d1128f5041f0472da
|
refs/heads/master
| 2021-01-10T19:48:21.355849
| 2015-03-16T09:52:22
| 2015-03-16T09:52:22
| 30,205,639
| 42
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,951
|
py
|
# Copyright (c) 2003-2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: dcerpc_v4.py 529 2012-04-29 21:39:46Z bethus@gmail.com $
#
# Description:
# Handle basic DCE/RPC protocol, version 4.
#
import array
import socket
import struct
from impacket import ImpactPacket
from impacket import uuid
from impacket import dcerpc
import dcerpc, conv
class DCERPC_RawCall(ImpactPacket.Header):
def __init__(self, op_num, data = ''):
self.OP_NUM = op_num
ImpactPacket.Header.__init__(self)
self.setData(data)
def setData(self, data):
self.get_bytes()[:] = array.array('B', data)
def get_header_size(self):
return len(self.get_bytes())
class MSRPCHeader(ImpactPacket.Header):
__SIZE = 80
def __init__(self, aBuffer = None):
ImpactPacket.Header.__init__(self, MSRPCHeader.__SIZE)
self.set_version(4)
self.set_type(dcerpc.MSRPC_REQUEST)
self.set_flags((0x08, 0x00))
self.set_representation((0x10, 0x00, 0x00))
self.set_serial((0, 0))
## self.set_if_version(3)
self.set_seq_num(0)
self.set_if_hint(0xFFFF)
self.set_activity_hint(0xFFFF)
if aBuffer: self.load_header(aBuffer)
def get_version(self):
return self.get_byte(0)
def set_version(self, version):
self.set_byte(0, version)
def get_type(self):
return self.get_byte(1)
def set_type(self, type):
self.set_byte(1, type)
def get_flags(self):
""" This method returns a tuple in (flags1, flags2) form."""
return (self.get_byte(2), self.get_byte(3))
def set_flags(self, flags):
""" This method takes a tuple in (flags1, flags2) form."""
self.set_byte(2, flags[0])
self.set_byte(3, flags[1])
def get_representation(self):
""" This method returns a tuple in (major, minor) form."""
return (self.get_byte(4), self.get_byte(5), self.get_byte(6))
def set_representation(self, representation):
""" This method takes a tuple in (major, minor) form."""
self.set_byte(4, representation[0])
self.set_byte(5, representation[1])
self.set_byte(6, representation[1])
def get_serial(self):
""" This method returns a tuple in (high, low) form."""
return (self.get_byte(7), self.get_byte(79))
def set_serial(self, serial):
""" This method takes a tuple in (high, low) form."""
self.set_byte(7, serial[0])
self.set_byte(79, serial[1])
def get_obj_binuuid(self):
return self.get_bytes().tolist()[8:8+16]
def set_obj_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[8:8+16] = array.array('B', binuuid)
def get_if_binuuid(self):
return self.get_bytes().tolist()[24:24+16]
def set_if_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[24:24+16] = array.array('B', binuuid)
def get_activity_binuuid(self):
return self.get_bytes().tolist()[40:40+16]
def set_activity_binuuid(self, binuuid):
assert 16 == len(binuuid)
self.get_bytes()[40:40+16] = array.array('B', binuuid)
def get_server_boottime(self):
return self.get_long(56, '<')
def set_server_boottime(self, time):
self.set_long(56, time, '<')
def get_if_version(self):
return self.get_long(60, '<')
def set_if_version(self, version):
self.set_long(60, version, '<')
def get_seq_num(self):
return self.get_long(64, '<')
def set_seq_num(self, num):
self.set_long(64, num, '<')
def get_op_num(self):
return self.get_word(68, '<')
def set_op_num(self, op):
self.set_word(68, op, '<')
def get_if_hint(self):
return self.get_word(70, '<')
def set_if_hint(self, hint):
self.set_word(70, hint, '<')
def get_activity_hint(self):
return self.get_word(72, '<')
def set_activity_hint(self, hint):
self.set_word(72, hint, '<')
def get_frag_len(self):
return self.get_word(74, '<')
def set_frag_len(self, len):
self.set_word(74, len, '<')
def get_frag_num(self):
return self.get_word(76, '<')
def set_frag_num(self, num):
self.set_word(76, num, '<')
def get_auth_proto(self):
return self.get_byte(78)
def set_auth_proto(self, proto):
self.set_byte(78, proto)
def get_header_size(self):
return MSRPCHeader.__SIZE
def contains(self, aHeader):
ImpactPacket.Header.contains(self, aHeader)
if self.child():
contents_size = self.child().get_size()
self.set_op_num(self.child().OP_NUM)
self.set_frag_len(contents_size)
def get_ctx_id(self):
# return self.get_word(20, '<')
return 0
def set_ctx_id(self, id):
# self.set_word(20, id, '<')
pass
class DCERPC_v4(dcerpc.DCERPC):
DEFAULT_FRAGMENT_SIZE = 1392
def __init__(self, transport):
dcerpc.DCERPC.__init__(self, transport)
self.__activity_uuid = uuid.generate()
self.__seq_num = 0
self._bind = 0 # Don't attempt binding unless it explicitly requested.
self.set_idempotent(0)
def set_default_max_fragment_size(self):
self.set_max_fragment_size(DCERPC_v4.DEFAULT_FRAGMENT_SIZE)
def bind(self, uuid, bogus_binds = ''):
"""If idempotent is non-zero, the package will be sent with
that flag enabled. Certain services react by skiping the CONV
phase during the binding.
"""
self._bind = 1 # Will bind later, when the first packet is transferred.
self.__if_uuid = uuid[:16]
self.__if_version = struct.unpack('<L', uuid[16:20])[0]
def get_idempotent(self):
return self.__idempotent
def set_idempotent(self, flag):
self.__idempotent = flag
def conv_bind(self):
# Receive CONV handshake.
# ImpactDecode: this block.
data = self._transport.recv()
rpc = MSRPCHeader(data)
activity_uuid = rpc.get_activity_binuuid()
_conv = conv.WhoAreYou(data[rpc.get_header_size():])
# ImpactDecode
rpc = MSRPCHeader()
rpc.set_type(dcerpc.MSRPC_RESPONSE)
rpc.set_if_binuuid(conv.MSRPC_UUID_CONV)
flags = rpc.get_flags()
rpc.set_flags((flags[0], 0x04))
rpc.set_activity_binuuid(activity_uuid)
_conv = conv.WhoAreYou2()
rpc.contains(_conv)
# The CONV response must be sent to the endpoint from where the request was received.
old_address = self._transport.get_addr()
peer_address = self._transport.get_recv_addr()
self._transport.set_addr(peer_address)
self._transport.send(rpc.get_packet())
self._transport.set_addr(old_address)
def send(self, data):
if isinstance(data, dcerpc.MSRPCHeader):
opnum = data['op_num']
packet = data['pduData']
else:
opnum = data.OP_NUM
packet = data.get_packet()
frag_num = 0
rpc = MSRPCHeader()
self.set_ctx_id(self._ctx)
rpc.set_if_binuuid(self.__if_uuid)
rpc.set_if_version(self.__if_version)
rpc.set_activity_binuuid(self.__activity_uuid)
rpc.set_seq_num(self.__seq_num)
frag = DCERPC_RawCall(opnum)
if self._max_frag:
offset = 0
while 1:
toSend = packet[offset:offset+self._max_frag]
if not toSend: break
flags = dcerpc.MSRPC_NOTAFRAG | dcerpc.MSRPC_RECRESPOND
if self.__idempotent: flags |= dcerpc.MSRPC_NOTFORIDEMP
offset += len(toSend)
if offset == len(packet): flags |= dcerpc.MSRPC_LASTFRAG
rpc.set_flags((flags, 0))
frag.setData(toSend)
rpc.contains(frag)
rpc.set_frag_num(frag_num)
self._transport.send(rpc.get_packet())
frag_num += 1
if self._bind and not self.__idempotent:
self._bind = 0
self.conv_bind()
self.recv() # Discard RPC_ACK.
else:
if self.__idempotent:
rpc.set_flags((dcerpc.MSRPC_NOTFORIDEMP, 0))
rpc.contains(packet)
self._transport.send(rpc.get_packet())
if self._bind and not self.__idempotent:
self._bind = 0
self.conv_bind()
self.recv() # Discard RPC_ACK.
self.__seq_num += 1
def recv(self):
data = self._transport.recv()
rpc = MSRPCHeader(data)
off = rpc.get_header_size()
return data[off:]
|
[
"la236am@users.isi.deterlab.net"
] |
la236am@users.isi.deterlab.net
|
1e905c442fe41f352798eab774b333e0624ba426
|
4292e22ea0014183186ffbb692eb252552359f86
|
/infrastructure/tools/get_inventory.py
|
fb010b4bcea083d4ca4e7f434266765b80d6b5b7
|
[
"MIT"
] |
permissive
|
nhsx/covid-chest-imaging-database
|
28127c41b0aa0e2e424594d17cd2f1d3fdebb5e3
|
9946684521130761e6f39b01d1651e61fc79080f
|
refs/heads/master
| 2023-08-24T12:00:13.589048
| 2023-08-14T14:59:29
| 2023-08-14T14:59:29
| 252,395,273
| 63
| 13
|
MIT
| 2023-09-11T05:01:02
| 2020-04-02T08:15:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
#!/usr/bin/env python
import argparse
import logging
from pathlib import Path
import boto3
logging.basicConfig(level=logging.INFO)
def download_inventory(main_bucket, output_folder):
"""Downloading the relevant inventory files
Parameters
----------
main_bucket : str
The main warehouse bucket name (inventory bucket name
will be extrapolated from there).
output_folder : str
The folder where to download the files.
"""
inventory_bucket = f"{main_bucket}-inventory"
s3_client = boto3.client("s3")
# Get the latest list of inventory files
objs = s3_client.list_objects_v2(
Bucket=inventory_bucket, Prefix=f"{main_bucket}/daily-full-inventory/hive",
)["Contents"]
latest_symlink = sorted([obj["Key"] for obj in objs])[-1]
response = s3_client.get_object(Bucket=inventory_bucket, Key=latest_symlink)
for line in response["Body"].read().decode("utf-8").split("\n"):
inventory_file = line.replace(f"s3://{inventory_bucket}/", "")
logging.info(f"Downloading inventory file: {inventory_file}")
output_path = Path(output_folder) / Path(inventory_file).name
s3_client.download_file(inventory_bucket, inventory_file, str(output_path))
logging.info(f"Saved to: {output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Download the latest set of S3 inventory files."
)
parser.add_argument(
"-b",
"--bucket",
default="nccid-data-warehouse-prod",
help="The bucket whose inventory to grab.",
)
parser.add_argument(
"-o",
"--output-folder",
default="./",
help="Where to download the inventory files",
)
args = parser.parse_args()
download_inventory(args.bucket, args.output_folder)
|
[
"gergely.imreh@faculty.ai"
] |
gergely.imreh@faculty.ai
|
e239b9fc54ed81cbc21409cf99891fe420dbbda3
|
939fbd0184a77bef1e406227320e6bacedc8acfc
|
/linkerd/datadog_checks/linkerd/__about__.py
|
3a3af84f9a651aa4f9d19deb558c73f71935b8a2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
netsil/integrations-core
|
d08b8df76c4ff7fea293c66404ac7f63153d2e51
|
ca1502937ed4febc6e426950b209ffd6263197fb
|
refs/heads/epoch/5.23.0
| 2023-06-08T01:22:29.394361
| 2022-10-12T11:32:29
| 2022-10-12T11:32:29
| 112,673,395
| 4
| 6
|
BSD-3-Clause
| 2022-10-12T11:32:30
| 2017-11-30T23:48:59
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = "1.0.0"
|
[
"noreply@github.com"
] |
noreply@github.com
|
8f26465aaa04312133e55a3be07fa3ebfdaba5af
|
3196460db64eded2daa77457643c8dd1ed1ba99e
|
/codechef/steve/COINS-wrong2.py
|
b8ec3851c8d755ef25b8402d188b8199eba086e0
|
[] |
no_license
|
prototypemagic/proto-mastery
|
94c649958792f00ea2a057b63ed0f7717b5ab05d
|
45f7ef2e998fa7dbc071f5c42217a83fd9340f51
|
refs/heads/master
| 2020-05-28T08:55:45.769199
| 2012-09-10T22:12:00
| 2012-09-10T22:12:00
| 3,097,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
#!/usr/bin/env python
# Steve Phillips / elimisteve
# 2012.01.04
# The following is wrong, to say the least, because input == 13 should
# produce output == 13, not 14. As the problem states, you cannot
# exchange Bytelandian coins for other Bytelandian coins.
def naive_max(num):
# Given in problem description
return num/2 + num/3 + num/4
def clever_max(num):
'''Turns every 12 bytelandian coins into 13, plus remainder'''
# NOT given in problem description
naive = naive_max(num)
maybe_bigger = (num/12) * 13 + (num % 12) # WRONG!
return maybe_bigger if maybe_bigger > naive else naive
n = 0
while True:
try:
n = int( raw_input().strip() )
print max([n, clever_max(n),
clever_max(n/2) + clever_max(n/3) + clever_max(n/4)])
except:
break
|
[
"elimisteve@gmail.com"
] |
elimisteve@gmail.com
|
a3bfb2d0297ea76d5536aa7f89ed478033f0da6a
|
25d65018ce75445949711fb7d9d761cdefeed640
|
/Q-Learning-for-Trading/envs.py
|
7de54089df16b44c1513b67a49149d5d3d62ef2d
|
[] |
no_license
|
kimcaprio/Trading_Stock_Market
|
a69998198431e8754603b5e018be7120ca4ea8a1
|
864e54681abdc26500b9cc9dacee4ac732076272
|
refs/heads/master
| 2023-01-24T18:17:56.033233
| 2020-11-25T01:47:54
| 2020-11-25T01:47:54
| 275,816,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,728
|
py
|
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import itertools
class TradingEnv(gym.Env):
"""
A 3-stock (MSFT, IBM, QCOM) trading environment.
State: [# of stock owned, current stock prices, cash in hand]
- array of length n_stock * 2 + 1
- price is discretized (to integer) to reduce state space
- use close price for each stock
- cash in hand is evaluated at each step based on action performed
Action: sell (0), hold (1), and buy (2)
- when selling, sell all the shares
- when buying, buy as many as cash in hand allows
- if buying multiple stock, equally distribute cash in hand and then utilize the balance
"""
def __init__(self, train_data, init_invest=20000):
# data
# 개별 int 형 데이터(소숫점포함) 데이터의 사이즈를 줄이기 위해 round 함수를 사용.
self.stock_price_history = np.around(train_data) # round up to integer to reduce state space
self.n_stock, self.n_step = self.stock_price_history.shape
# instance attributes
self.init_invest = init_invest
self.cur_step = None
self.stock_owned = None
self.stock_price = None
self.cash_in_hand = None
# action space
self.action_space = spaces.Discrete(3**self.n_stock)
# observation space: give estimates in order to sample and build scaler
stock_max_price = self.stock_price_history.max(axis=1)
stock_range = [[0, init_invest * 2 // mx] for mx in stock_max_price]
price_range = [[0, mx] for mx in stock_max_price]
cash_in_hand_range = [[0, init_invest * 2]]
self.observation_space = spaces.MultiDiscrete(stock_range + price_range + cash_in_hand_range)
# seed and start
self._seed()
self._reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _reset(self):
self.cur_step = 0
self.stock_owned = [0] * self.n_stock
self.stock_price = self.stock_price_history[:, self.cur_step]
self.cash_in_hand = self.init_invest
return self._get_obs()
def _step(self, action):
assert self.action_space.contains(action)
prev_val = self._get_val()
self.cur_step += 1
self.stock_price = self.stock_price_history[:, self.cur_step] # update price
self._trade(action)
cur_val = self._get_val()
reward = cur_val - prev_val
done = self.cur_step == self.n_step - 1
info = {'cur_val': cur_val}
return self._get_obs(), reward, done, info
def _get_obs(self):
obs = []
obs.extend(self.stock_owned)
obs.extend(list(self.stock_price))
obs.append(self.cash_in_hand)
return obs
def _get_val(self):
return np.sum(self.stock_owned * self.stock_price) + self.cash_in_hand
def _trade(self, action):
# all combo to sell(0), hold(1), or buy(2) stocks
action_combo = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock)))
# print("action_combo : ", action_combo)
action_vec = action_combo[action]
# one pass to get sell/buy index
sell_index = []
buy_index = []
for i, a in enumerate(action_vec):
if a == 0:
sell_index.append(i)
elif a == 2:
buy_index.append(i)
# two passes: sell first, then buy; might be naive in real-world settings
if sell_index:
for i in sell_index:
self.cash_in_hand += self.stock_price[i] * self.stock_owned[i]
self.stock_owned[i] = 0
if buy_index:
can_buy = True
while can_buy:
for i in buy_index:
if self.cash_in_hand > self.stock_price[i]:
self.stock_owned[i] += 1 # buy one share
self.cash_in_hand -= self.stock_price[i]
else:
can_buy = False
|
[
"kimcaprio1@naver.com"
] |
kimcaprio1@naver.com
|
4e8b509ecc099975f3e38d997799d301173a5bf0
|
59db4cd30b6677ba4f45d9b488d05c20a9f311b2
|
/temperature_v_time.py
|
d8b720c559c25e03a3020f1e9c2c4208881c210d
|
[] |
no_license
|
krmnino/XrayDiffraction-DataReduction
|
1f11b5759d9efd9edcdb2a1185ab18c641972fee
|
eeec84220a70eb99a3bac1a624ff79fc797dbe7f
|
refs/heads/master
| 2020-11-24T11:17:44.694356
| 2019-12-15T03:27:44
| 2019-12-15T03:27:44
| 228,122,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,857
|
py
|
####################################
# temperature_v_time.py
# Python 3.x
# Kurt Manrique-Nino
####################################
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from datetime import datetime
def get_delta_time(time1, time2):
delta_t = time2 - time1
return delta_t.total_seconds()
def parse_time_par_file(path):
str_time_arr = []
time_arr = []
delta_time_arr = []
with open(path) as par_file:
for line in par_file:
str_time_arr.append(line[4:24])
str_time_arr.pop(0)
for time in str_time_arr:
time_arr.append(datetime.strptime(time, "%b %d %H:%M:%S %Y"))
for time in time_arr:
delta_time_arr.append(get_delta_time(time_arr[0], time))
return delta_time_arr
def parse_temperature_par_file(path):
temperature_arr = np.genfromtxt(path, delimiter = ' ', usecols = -2)
return temperature_arr
def parse_first_scan_par_file(path):
scan = 0
with open(path) as par_file:
counter = 0
for line in par_file:
counter += 1
if(counter == 2):
temp = line.split()
scan = int(temp[7])
return scan
def get_subdirs(root):
for src, dirs, files in os.walk(root):
return dirs
def build_chi_paths(subdirs, root, start_from_scan):
sort_subdirs = list(map(int, results))
sort_subdirs.sort()
root_dir = root
paths = []
full_paths = []
if(root_dir[len(root) - 1] != '/'):
root_dir += '/'
else:
pass
for sub in range(0, len(sort_subdirs) + 1):
paths.append('%s%d%s'%(root_dir, sub, '/'))
trimmed_paths = paths[start_from_scan:]
for path in trimmed_paths:
for src, dirs, files in os.walk(path):
for file in files:
if(file.endswith('.chi')):
temp = os.path.join(src, file)
full_paths.append(temp)
return full_paths
def parse_angle_chi_files(arrayPaths):
angle_arr = np.genfromtxt(arrayPaths[0], delimiter = ' ', usecols = 0)
return angle_arr
def parse_intensity_chi_files(arrayPaths):
collection_intensity = []
for path in arrayPaths:
intensity_arr = []
intensity = np.genfromtxt(path, delimiter = ' ', usecols = 1)
for i in range(0, intensity.size):
collection_intensity.append(intensity[i])
return collection_intensity
def display_graphs(angle_arr, time_arr, intensity_arr, temperature_arr, path, zi, zf):
#controur graph data
x = np.array(angle_arr)
y = np.array(time_arr)
x1, y1 = np.meshgrid(x, y)
z = np.array(intensity_arr).reshape(x1.shape)
#subplots
f, (g1, g2, g3) = plt.subplots(1, 3)
fileName = path.split('/')
f.suptitle(fileName[len(fileName) - 2])
#subplot plot
g1.plot(temperature_arr, time_arr, 'b')
g1.set_xlabel('Temperture')
g1.set_ylabel('Time (seconds)')
g1.set_ylim([y[0], y[len(y) - 1]])
g1.grid()
#subplot contour
g2.contourf(x1, y1, z)
g2.set_xlabel('Angle (degrees)')
g2.set_ylabel('Time (seconds)')
#subplot zoom-in
if(zi < angle_arr[0] or zi > zf or zf > angle_arr[len(angle_arr) - 1]):
print('Invalid values to generate zoomed contour graph')
else:
g3.contourf(x1, y1, z)
g3.set_xlim([zi, zf])
g3.set_xlabel('Angle (degrees)')
g3.set_ylabel('Time (seconds)')
plt.show()
root_directory = input('Enter the path to the root folder path: ')
par_file_path = input('Enter the path to the raw .par file: ')
print('-----Zoom contour graph by angle (degrees)-----')
zoom_contour_from = float(input('From: '))
zoom_contour_to = float(input('To: '))
time_array = parse_time_par_file(par_file_path)
temperature_array = parse_temperature_par_file(par_file_path)
start_from_scan = parse_first_scan_par_file(par_file_path)
raw_subdirs = get_subdirs(root_directory)
chi_paths = build_chi_paths(raw_subdirs, root_directory, start_from_scan)
angle_array = parse_angle_chi_files(chi_paths)
intensity_array = parse_intensity_chi_files(chi_paths)
display_graphs(angle_array, time_array, intensity_array, temperature_array, par_file_path, zoom_contour_from, zoom_contour_to)
|
[
"kurt.manrique.n@gmail.com"
] |
kurt.manrique.n@gmail.com
|
61da67eb396105306a86db070d5dde3620b80f21
|
71f8ecf0ff44ae70d506d6dc68e933a7f5b2f984
|
/main/gameModule/events.py
|
2bdb5c5fe7dadcf33fdb298c918d10dbc36c9024
|
[] |
no_license
|
CitationNerded/PythonGameFromJad
|
801b700b0273e25414b36b87ae2c8c157c5cb4ec
|
937bc754a708ca46ddbd7f652557a0e85fb2221e
|
refs/heads/master
| 2022-12-21T12:30:33.793025
| 2017-08-16T21:51:10
| 2017-08-16T21:51:10
| 85,260,221
| 0
| 3
| null | 2022-12-20T02:55:17
| 2017-03-17T02:03:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,543
|
py
|
"""events - contains events and game classes."""
DIRECTION_UP = 0
DIRECTION_DOWN = 1
DIRECTION_LEFT = 2
DIRECTION_RIGHT = 3
class Event:
"""Event - superclass defines events that any objects that maybe called by the event manager."""
def __init__(self):
"""Initialise Event Class."""
self.name = 'Generic Event'
class TickEvent(Event):
"""TickEvent - manage tick events while the program runs."""
def __init__(self):
"""Initialise TickEvent Class."""
self.name = 'CPU Tick Event'
class QuitEvent(Event):
"""QuitEvent - manage quit events."""
def __init__(self):
"""Initialise Program Quit Event."""
self.name = 'Program Quit Event'
class MapBuiltEvent(Event):
"""MapBuiltEvent - map building event."""
def __init__(self, map):
"""Initialise Map Built Event."""
self.name = 'Map Built Event'
self.map = map
class GameStartedEvent(Event):
"""GameStartedEvent - game starting event."""
def __init__(self, game):
"""Initialise Game Started Event."""
self.name = 'Game Started Event'
self.game = game
class CharacterMoveRequest(Event):
"""CharacterMoveRequest - Request that a character moves."""
def __init__(self, direction):
"""Initialise Character Move Request."""
self.name = 'Character Move Request'
self.direction = direction
class CharacterPlaceEvent(Event):
"""CharacterPlaceRequest - Place Character."""
def __init__(self, character):
"""Initialise Character Place Event."""
self.name = 'Character Place Event'
self.character = character
class CharacterMoveEvent(Event):
"""CharacterMoveEvent - move character."""
def __init__(self, character):
""""Initialise Character Move Event."""
self.name = 'Character Move Event'
self.character = character
class EventManager:
"""This class is responsible for co-oridnating events across the mvc."""
def __init__(self):
""""Initialise Event Manager."""
from weakref import WeakKeyDictionary
self.listeners = WeakKeyDictionary()
self.eventQueue = []
def RegisterListener(self, listener):
"""Register Event Listeners."""
self.listeners[listener] = 1
def UnregisterListener(self, listener):
"""Unregister Event Listeners."""
if listener in self.listeners.keys():
del self.listeners[listener]
def Post(self, event):
"""Post listener events."""
if not isinstance(event, TickEvent):
print("Event: %s" % (event.name))
for listener in self.listeners.keys():
listener.Notify(event)
class Game:
"""Game Class - model that looks after the Game."""
STATE_PREPARING = 0
STATE_RUNNING = 1
STATE_PAUSED = 2
def __init__(self, evMananger):
"""Initialise the Game Class."""
self.evMananger = evMananger
self.evMananger.RegisterListener(self)
self.state = Game.STATE_PREPARING
self.players = [Player(evMananger)]
self.map = Map(evMananger)
def Start(self):
"""Start Game - set state to RUNNING."""
self.map.Build()
self.state = Game.STATE_RUNNING
ev = GameStartedEvent(self)
self.evMananger.Post(ev)
def Notify(self, event):
"""Notify - if the program sees a TickEvent start the game."""
if isinstance(event, TickEvent):
if self.state == Game.STATE_PREPARING:
self.Start()
class Player:
"""Manages players class."""
def __init__(self, evMananger):
"""Initialise player data."""
self.evMananger = evMananger
self.characters = [Character(evMananger)]
class Character:
"""Manages Character Class."""
def __init__(self, evMananger):
"""Initialise the Character class."""
self.evMananger = evMananger
self.evMananger.RegisterListener(self)
self.sector = None
def Move(self, direction):
"""Move the character object."""
if self.sector.MovePossible(direction):
newSector = self.sector.neighbors[direction]
self.sector = newSector
ev = CharacterMoveEvent(self)
self.evMananger.Post(ev)
def Place(self, sector):
"""Place the character object on screen."""
self.sector = sector
ev = CharacterPlaceEvent(self)
self.evMananger.Post(ev)
def Notify(self, event):
"""Notify the character objects."""
if isinstance(event, GameStartedEvent):
map = event.game.map
self.Place(map.sectors[map.startSectorIndex])
elif isinstance(event, CharacterMoveRequest):
self.Move(event.direction)
class Map:
"""Manages Map Class object."""
def __init__(self, evMananger):
"""Initialise Map Class."""
self.evMananger = evMananger
self.sectors = list(range(9))
self.startSectorIndex = 0
def Build(self):
"""Build Relational Map data."""
for i in list(range(9)):
self.sectors[i] = Sector(self.evMananger)
self.sectors[3].neighbors[DIRECTION_UP] = self.sectors[0]
self.sectors[4].neighbors[DIRECTION_UP] = self.sectors[1]
self.sectors[5].neighbors[DIRECTION_UP] = self.sectors[2]
self.sectors[6].neighbors[DIRECTION_UP] = self.sectors[3]
self.sectors[7].neighbors[DIRECTION_UP] = self.sectors[4]
self.sectors[8].neighbors[DIRECTION_UP] = self.sectors[5]
self.sectors[0].neighbors[DIRECTION_DOWN] = self.sectors[3]
self.sectors[1].neighbors[DIRECTION_DOWN] = self.sectors[4]
self.sectors[2].neighbors[DIRECTION_DOWN] = self.sectors[5]
self.sectors[3].neighbors[DIRECTION_DOWN] = self.sectors[6]
self.sectors[4].neighbors[DIRECTION_DOWN] = self.sectors[7]
self.sectors[5].neighbors[DIRECTION_DOWN] = self.sectors[8]
self.sectors[1].neighbors[DIRECTION_LEFT] = self.sectors[0]
self.sectors[2].neighbors[DIRECTION_LEFT] = self.sectors[1]
self.sectors[4].neighbors[DIRECTION_LEFT] = self.sectors[3]
self.sectors[5].neighbors[DIRECTION_LEFT] = self.sectors[4]
self.sectors[7].neighbors[DIRECTION_LEFT] = self.sectors[6]
self.sectors[8].neighbors[DIRECTION_LEFT] = self.sectors[7]
self.sectors[0].neighbors[DIRECTION_RIGHT] = self.sectors[1]
self.sectors[1].neighbors[DIRECTION_RIGHT] = self.sectors[2]
self.sectors[3].neighbors[DIRECTION_RIGHT] = self.sectors[4]
self.sectors[4].neighbors[DIRECTION_RIGHT] = self.sectors[5]
self.sectors[6].neighbors[DIRECTION_RIGHT] = self.sectors[7]
self.sectors[7].neighbors[DIRECTION_RIGHT] = self.sectors[8]
ev = MapBuiltEvent(self)
self.evMananger.Post(ev)
class Sector:
"""Sector Management Class."""
def __init__(self, evMananger):
"""Initialise Sector class."""
self.evMananger = evMananger
#self.evMananger.RegisterListener(self)
self.neighbors = list(range(4))
self.neighbors[DIRECTION_UP] = None
self.neighbors[DIRECTION_DOWN] = None
self.neighbors[DIRECTION_LEFT] = None
self.neighbors[DIRECTION_RIGHT] = None
def MovePossible(self, direction):
"""Check and see if the sector has neighbors."""
if self.neighbors[direction]:
return 1
|
[
"jad.pamment@assurity.co.nz"
] |
jad.pamment@assurity.co.nz
|
a8bc57c698c594a2bbcddc5d8812f0eacc6865c7
|
efbee30a83911a7dda989fb4536dd7132278aca1
|
/whileLoop.py
|
b00e004c2c74fa1542762f526fe1ba58b51cc155
|
[] |
no_license
|
sarvani-chitturi/binary-search
|
71c9cbb3bf057cddadbdeadd3a5292c687b46d49
|
6d4b4a41e7a91a2aad5369f47c8060af42fb4f75
|
refs/heads/master
| 2023-01-02T12:27:34.542572
| 2020-10-09T14:25:56
| 2020-10-09T14:25:56
| 302,661,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 10:21:36 2020
@author: Amarnadh
"""
i=1
while i<=5:
print(i)
i+=1
print("List using while")
x=[10,20,30,40,50]
i=0
while i < len(x):
print(x[i],end=' ')
i+=1
|
[
"121910301041@gitam.in"
] |
121910301041@gitam.in
|
143fe68d7f6815fea8d18f1fb028024f23bd7c51
|
bd02997a44218468b155eda45dd9dd592bb3d124
|
/leetcode_course-schedule2.py
|
ca4b99612fab4cd4749f3814a1054bbfb691055d
|
[] |
no_license
|
rheehot/ProblemSolving_Python
|
88b1eb303ab97624ae6c97e05393352695038d14
|
4d6dc6aea628f0e6e96530646c66216bf489427f
|
refs/heads/master
| 2023-02-13T03:30:07.039231
| 2021-01-04T06:04:11
| 2021-01-04T06:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
'''
Problem Solving leetcode course-schedule2
Author: Injun Son
Date: October 25, 2020
'''
import sys
import collections
import heapq
import functools
import itertools
import re
import math
import bisect
from typing import *
def canFinish(numCourses: int, prerequisites: List[List[int]]) -> bool:
graph = collections.defaultdict(list)
# 그래프 구성
for x, y in prerequisites:
graph[x].append(y)
traced = set()
visited = set()
def dfs(i):
# 순환 구조이면 False
if i in traced:
return False
# 이미 방문 했던 노드이면 True
if i in visited:
return True
traced.add(i)
for y in graph[i]:
if not dfs(y):
return False
#탐색 종료 후 순환 노드 삭제
traced.remove(i)
#탐색 종료 후 방문 노드 추가
visited.add(i)
return True
for x in list(graph):
if not dfs(x):
return False
return True
print(canFinish(2, [[1,0]]))
print(canFinish(2, [[1,0], [0,1]]))
|
[
"ison@sfu.ca"
] |
ison@sfu.ca
|
066e81a0fbe03a8fbc53b78c094138284f850ede
|
6c80119e02bb29761fc7854c5a2f2a144451ca5a
|
/tests/fakeIDP.py
|
971281cd5d87940746d418b565f9b43de490a12b
|
[
"BSD-2-Clause"
] |
permissive
|
josjevv/pysaml2
|
c412a21db7a52334bf67feeabc38f877a121f973
|
f806786f6dad8fc2b03daa0e1d55682daead3ec8
|
refs/heads/master
| 2020-12-25T12:17:41.628279
| 2013-04-22T11:45:25
| 2013-04-22T11:45:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,520
|
py
|
from urlparse import parse_qs
from saml2.saml import AUTHN_PASSWORD
from saml2.samlp import attribute_query_from_string, logout_request_from_string
from saml2 import BINDING_HTTP_REDIRECT, pack
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_SOAP
from saml2.server import Server
from saml2.soap import parse_soap_enveloped_saml_attribute_query
from saml2.soap import parse_soap_enveloped_saml_logout_request
from saml2.soap import make_soap_enveloped_saml_thingy
__author__ = 'rolandh'
TYP = {
"GET": [BINDING_HTTP_REDIRECT],
"POST": [BINDING_HTTP_POST, BINDING_SOAP]
}
def unpack_form(_str, ver="SAMLRequest"):
SR_STR = "name=\"%s\" value=\"" % ver
RS_STR = 'name="RelayState" value="'
i = _str.find(SR_STR)
i += len(SR_STR)
j = _str.find('"', i)
sr = _str[i:j]
k = _str.find(RS_STR, j)
k += len(RS_STR)
l = _str.find('"', k)
rs = _str[k:l]
return {ver:sr, "RelayState":rs}
class DummyResponse(object):
def __init__(self, code, data, headers=None):
self.status_code = code
self.text = data
self.headers = headers or []
class FakeIDP(Server):
def __init__(self, config_file=""):
Server.__init__(self, config_file)
#self.sign = False
def receive(self, url, method="GET", **kwargs):
"""
Interface to receive HTTP calls on
:param url:
:param method:
:param kwargs:
:return:
"""
if method == "GET":
path, query = url.split("?")
qs_dict = parse_qs(kwargs["data"])
req = qs_dict["SAMLRequest"][0]
rstate = qs_dict["RelayState"][0]
else:
# Could be either POST or SOAP
path = url
try:
qs_dict = parse_qs(kwargs["data"])
req = qs_dict["SAMLRequest"][0]
rstate = qs_dict["RelayState"][0]
except KeyError:
req = kwargs["data"]
rstate = ""
response = ""
# Get service from path
for key, vals in self.config.getattr("endpoints", "idp").items():
for endp, binding in vals:
if path == endp:
assert binding in TYP[method]
if key == "single_sign_on_service":
return self.authn_request_endpoint(req, binding,
rstate)
elif key == "single_logout_service":
return self.logout_endpoint(req, binding)
for key, vals in self.config.getattr("endpoints", "aa").items():
for endp, binding in vals:
if path == endp:
assert binding in TYP[method]
if key == "attribute_service":
return self.attribute_query_endpoint(req, binding)
return response
def authn_request_endpoint(self, req, binding, relay_state):
req = self.parse_authn_request(req, binding)
if req.message.protocol_binding == BINDING_HTTP_REDIRECT:
_binding = BINDING_HTTP_POST
else:
_binding = req.message.protocol_binding
try:
resp_args = self.response_args(req.message, [_binding])
except Exception:
raise
identity = { "surName":"Hedberg", "givenName": "Roland",
"title": "supertramp", "mail": "roland@example.com"}
userid = "Pavill"
authn_resp = self.create_authn_response(identity,
userid=userid,
authn=(AUTHN_PASSWORD,
"http://www.example.com/login"),
**resp_args)
response = "%s" % authn_resp
_dict = pack.factory(_binding, response,
resp_args["destination"], relay_state,
"SAMLResponse")
return DummyResponse(200, **_dict)
def attribute_query_endpoint(self, xml_str, binding):
if binding == BINDING_SOAP:
_str = parse_soap_enveloped_saml_attribute_query(xml_str)
else:
_str = xml_str
aquery = attribute_query_from_string(_str)
extra = {"eduPersonAffiliation": "faculty"}
userid = "Pavill"
name_id = aquery.subject.name_id
attr_resp = self.create_attribute_response(extra, aquery.id,
None,
sp_entity_id=aquery.issuer.text,
name_id=name_id,
attributes=aquery.attribute)
if binding == BINDING_SOAP:
# SOAP packing
#headers = {"content-type": "application/soap+xml"}
soap_message = make_soap_enveloped_saml_thingy(attr_resp)
# if self.sign and self.sec:
# _signed = self.sec.sign_statement_using_xmlsec(soap_message,
# class_name(attr_resp),
# nodeid=attr_resp.id)
# soap_message = _signed
response = "%s" % soap_message
else: # Just POST
response = "%s" % attr_resp
return DummyResponse(200, response)
def logout_endpoint(self, xml_str, binding):
if binding == BINDING_SOAP:
_str = parse_soap_enveloped_saml_logout_request(xml_str)
else:
_str = xml_str
req = logout_request_from_string(_str)
_resp = self.create_logout_response(req, [binding])
if binding == BINDING_SOAP:
# SOAP packing
#headers = {"content-type": "application/soap+xml"}
soap_message = make_soap_enveloped_saml_thingy(_resp)
# if self.sign and self.sec:
# _signed = self.sec.sign_statement_using_xmlsec(soap_message,
# class_name(attr_resp),
# nodeid=attr_resp.id)
# soap_message = _signed
response = "%s" % soap_message
else: # Just POST
response = "%s" % _resp
return DummyResponse(200, response)
|
[
"roland.hedberg@adm.umu.se"
] |
roland.hedberg@adm.umu.se
|
319a3a97837ddfcfbffea4e1f6c5bc6acd3ee2c8
|
a9ee2f04b70542b2f399dc7ebb803164348aef0e
|
/mutations.py
|
c614d08dc3472aa20173fe38ab13d2cd6618ece8
|
[] |
no_license
|
murilosisnando2003/Hacker_Rank
|
6617e55f362bf82e8ad73189a3e9075199d31ef9
|
5ab80e7da2846880bddfb15815afaf3351269a42
|
refs/heads/master
| 2021-01-04T15:50:47.381339
| 2020-03-06T22:50:26
| 2020-03-06T22:50:26
| 240,621,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
def mutate_string(string, position, character):
string = string[:position] + character + string[(position+1):]
return string
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
|
[
"Murilo Rodrigues@DESKTOP-ONT7ENB.EDGE-BR.LOCAL"
] |
Murilo Rodrigues@DESKTOP-ONT7ENB.EDGE-BR.LOCAL
|
898fca67981db8c25af682928af05f412c567dc6
|
c9f8532ea47337269e3f87cb3d75cccd8146536e
|
/data_pre.py
|
9bf2fa09ac73513eac52b6c6267907b9b20e868d
|
[] |
no_license
|
lianrenbao/huawei_remote-sensing
|
6d174becb13981339c289f04775d872fe004eaa9
|
84f54eb5dd50c3eb5c9d2aaed6bdb475b4d5b924
|
refs/heads/master
| 2020-06-12T20:10:59.319499
| 2019-06-28T11:57:46
| 2019-06-28T11:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
import numpy as np
from functools import partial
import pandas as pd
import os
from tqdm import tqdm_notebook, tnrange, tqdm
import sys
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict
from torchvision import transforms as T
from imgaug import augmenters as iaa
import random
import pathlib
import cv2
# create dataset class
class create_test(Dataset):
def __init__(self,images_df, base_path,augument=True,mode="train"):
if not isinstance(base_path, pathlib.Path):
base_path = pathlib.Path(base_path)
self.images_df = images_df.copy() #csv
self.augument = augument
self.images_df.Id = self.images_df.Id.apply(lambda x:base_path / str(x))#.zfill(6))
self.mode = mode
def __len__(self):
return len(self.images_df)
def __getitem__(self,index):
X = self.read_images(index)
if not self.mode == "test":
y = self.images_df.iloc[index].Target
else:
y = str(self.images_df.iloc[index].Id.absolute())
if self.augument:
X = self.augumentor(X)
X = T.Compose([T.ToPILImage(),T.ToTensor()])(X)
return X.float(),y
def read_images(self,index):
row = self.images_df.iloc[index]
filename = str(row.Id.absolute())
#print(filename)
images = cv2.imread(filename)#+'.jpg')
return images
def augumentor(self,image):
augment_img = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.SomeOf((0,4),[
iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270),
iaa.Affine(shear=(-16, 16)),
]),
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
]),
#iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
], random_order=True)
image_aug = augment_img.augment_image(image)
return image_aug
'''
test_files = pd.read_csv("/home/dell/Desktop/1.csv")
#train_gen = MultiModalDataset(train_data_list,config.train_data,config.train_vis,mode="train")
test_gen = create_test(test_files,'/media/dell/dell/data/遥感/test/',augument=False,mode="test")
#test_loader = DataLoader(test_gen,1,shuffle=False,pin_memory=True,num_workers=16)
x,y=test_gen[1]
print(x)
print(y)
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
d125fd84b86e729ddc089d31810ce1cd547079c7
|
8a4405ff28d61133d451f187a443f7db9758655c
|
/pstipspider/pipelines.py
|
75808af3eba1b95df171e6ee75dd2700f991d38f
|
[] |
no_license
|
co89757/pstipspider
|
2dc5ca50528b039972b06023531841abe6bb2618
|
1b7bf719e8be8589dffa9cbdc49606a3b7d1bdce
|
refs/heads/master
| 2021-01-25T00:57:21.251609
| 2017-06-18T19:47:15
| 2017-06-18T19:47:15
| 94,708,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
# -*- coding: utf-8 -*-
import pstipspider.settings as proj_settings
from scrapy.exceptions import DropItem
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PstipspiderPipeline(object):
def process_item(self, item, spider):
cutoff_yr = proj_settings.CUTOFF_YEAR
if item.get("Date").year < cutoff_yr:
raise DropItem(
"Dropping item ({0}...) whose publish year is too old: {1}",
item["Title"][:15], item["Date"].year)
title_low = item["Title"].lower()
if not ('power' in title_low):
raise DropItem(
"Dropping item that has no powershell keyword in it")
dt = item.get("Date")
dt = dt.date().isoformat()
# scrape off time info
item["Date"] = dt
return item
|
[
"colin.brat@gmail.com"
] |
colin.brat@gmail.com
|
a839419dd4c4c0aff907413b92ba0217fb42f65a
|
640b2096a3ff25ac6dbd65fa38a246218cfeac1e
|
/add_two_numbers_ii.py
|
3b601220ae2ce72f69d9b805fefd11d213da43d3
|
[] |
no_license
|
benjiaming/leetcode
|
53d00082289dd847bb81df674ef68832a89220c7
|
6e09d1bfe9eb7476125eb31d95616a115f2e6f7f
|
refs/heads/master
| 2021-06-12T22:46:06.279667
| 2019-05-25T18:37:23
| 2019-05-25T18:37:23
| 183,532,697
| 0
| 0
| null | 2021-04-20T18:07:59
| 2019-04-26T01:10:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
"""
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Note:
Your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Example:
Input: numbers = [2,7,11,15], target = 9
Output: [1,2]
Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
"""
def cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
class Solution(object):
def twoSumDict(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
Uses a dictionary; runs in O(n)
"""
seen = {}
for i, num in enumerate(numbers):
if num in seen:
return [seen[num]+1, i+1]
seen[target-num] = i
return []
def twoSumCmp(self, numbers, target):
# two pointer technique
i = 0
j = len(numbers) - 1
while i < j:
result = cmp(numbers[i], target - numbers[j])
if result < 0:
i += 1
elif result > 0:
j-= 1
else:
return [i+1, j+1]
return []
|
[
"ben@blazke.cz"
] |
ben@blazke.cz
|
cd3cc071876da1756cf23e55593a8321a820dca2
|
7f7d31cc7728ebff806e5ab49321572d08198764
|
/Chess-Engine/6. EnPassant and Pawn Promotion in Advanced Algo/ChessMain.py
|
ed4ec4b840b195624701ab281baf414f69cb2d9d
|
[] |
no_license
|
Aneiongit/Bootcamp
|
02dff9cda47f109e97faa6732e2b610f3a275452
|
cf562ec12afd63a47ad5e48e4d1496e98afe4624
|
refs/heads/main
| 2023-07-17T14:55:41.699169
| 2021-09-03T09:40:16
| 2021-09-03T09:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
"""
This is our main driver file. It will be responsible for
- handling user input
- displaying current GameState object
"""
import pygame as p
import ChessEngineAd as ChessEngine
# import ChessEngine
p.init()
WIDTH = HEIGHT = 480
DIMENTION = 8 # 8*8 CHESS BOARD
SQ_SIZE = HEIGHT // DIMENTION
MAX_FPS = 15
IMAGES = {}
'''
Initialise the global dictionary of images. This will be called exactly once in the main
'''
def loadImages():
pieces = ['bP', 'bR', 'bN', 'bB', 'bQ', 'bK', 'wP', 'wR', 'wN', 'wB', 'wQ', 'wK']
for piece in pieces:
IMAGES[piece] = p.transform.scale(p.image.load("images/" + piece + ".png"), (SQ_SIZE, SQ_SIZE ) )
# Note: We can access a piece by saying IMAGES['wP'] -> will give white pawn;
'''
This will be out main driver. It will handle user input and update the graphics.
'''
def main():
screen = p.display.set_mode((WIDTH, HEIGHT))
clock = p.time.Clock()
screen.fill(p.Color('white'))
gs = ChessEngine.GameState()
validMoves = gs.getValidMoves() # get a list of valid moves.
moveMade = False # to check if the user made a move. If true recalculate validMoves.
loadImages() #only do this once -> before the while loop
running = True
sqSelected = () #no sq is selected initially, keep track of the last click by the user -> (tuple : (row,col))
playerClicks = [] # contains players clicks => [(6,4),(4,4)] -> pawn at (6,4) moved 2 steps up on (4,4)
while running:
for e in p.event.get():
if e.type == p.QUIT :
runnin = False
#MOUSE HANDLERS
elif e.type == p.MOUSEBUTTONDOWN:
location = p.mouse.get_pos() # (x,y) position of mouse
col = location[0]//SQ_SIZE
row = location[1]//SQ_SIZE
if sqSelected == (row, col): # user selected the same sq. twice -> deselect the selecion
sqSelected = ()
playerClicks = []
else:
sqSelected = (row,col)
playerClicks.append(sqSelected) # append for both 1st and 2nd click
if len(playerClicks) == 2: # when 2nd click
move = ChessEngine.Move(playerClicks[0],playerClicks[1], gs.board)
for i in range(len(validMoves)):
if move == validMoves[i]:
gs.makeMove(validMoves[i])
moveMade = True
playerClicks = [] # reset platerClicks
sqSelected = () # reset user clicks
if not moveMade :
playerClicks = [sqSelected]
#KEY HANDLERS
elif e.type == p.KEYDOWN:
if e.key == p.K_z:
gs.undoMove()
moveMade = True #can do `validMoves = gs.validMoves()` but then if we change function name we will have to change the call at various places.
if moveMade:
validMoves = gs.getValidMoves()
moveMade = False
drawGameState(screen, gs)
clock.tick(MAX_FPS)
p.display.flip()
'''
responsible for all the graphics in the game
'''
def drawGameState(screen, gs):
drawBoard(screen) #draw squares on board (should be called before drawing anything else)
drawPieces(screen, gs.board) #draw pieces on the board
# FUTURE SCOPE : add in piece highlighting or move suggestions
'''
draw the squares on the board
'''
def drawBoard(screen):
colors = [p.Color(235, 235, 208), p.Color(119, 148, 85)]
for r in range(DIMENTION):
for c in range(DIMENTION):
color = colors[(r+c)%2]
p.draw.rect(screen, color, p.Rect(SQ_SIZE*c, SQ_SIZE*r , SQ_SIZE, SQ_SIZE))
'''
draw the pieces on the board using ChessEngine.GameState.board.
'''
def drawPieces(screen, board):
for r in range(DIMENTION):
for c in range(DIMENTION):
piece = board[r][c]
if piece != '--':
screen.blit(IMAGES[piece], p.Rect(SQ_SIZE*c, SQ_SIZE*r , SQ_SIZE, SQ_SIZE))
if __name__ == '__main__':
main()
|
[
"Goh15@wp.pl"
] |
Goh15@wp.pl
|
032e56abd8b0c04a05f86672375efe48bbc0e751
|
ca37c80b43e342dd8013deffbb35b481fab21f5f
|
/convert.py
|
c601988bafd133488f7998dda9696b106b146bf6
|
[
"MIT"
] |
permissive
|
GraphSAINT/GraphSAINT
|
c95aeeab91acdd4210767e398839b2344ed8a42c
|
c9b1e340d7b951465ac4a9251eef93832e68b003
|
refs/heads/master
| 2022-09-01T02:16:30.441922
| 2022-08-12T06:08:09
| 2022-08-12T06:08:09
| 169,026,799
| 444
| 92
|
MIT
| 2022-08-12T06:08:10
| 2019-02-04T04:25:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,409
|
py
|
import scipy.sparse as sp
import numpy as np
import networkx as nx
import sys
import json
import os
from networkx.readwrite import json_graph
dataset_str=sys.argv[1]
baseline_str='data.ignore/'+dataset_str+'/'
dataset_str='data/'+dataset_str+'/'
if not os.path.exists(baseline_str[:-1]):
os.mkdir(baseline_str[:-1])
# G.json
adj_full=sp.load_npz(dataset_str+'adj_full.npz')
G=nx.from_scipy_sparse_matrix(adj_full)
print('nx: finish load graph')
data=json_graph.node_link_data(G)
role=json.load(open(dataset_str+'role.json','r'))
te=set(role['te'])
va=set(role['va'])
for node in data['nodes']:
node['test']=False
node['val']=False
if node['id'] in te:
node['test']=True
elif node['id'] in va:
node['val']=True
for edge in data['links']:
del edge['weight']
edge['target']=int(edge['target'])
with open(baseline_str+'G.json','w') as f:
json.dump(data,f)
# id_map.json
id_map={}
for i in range(G.number_of_nodes()):
id_map[str(i)]=i
with open(baseline_str+'id_map.json','w') as f:
json.dump(id_map,f)
# feats.npy
feats=np.load(dataset_str+'feats.npy')
np.save(baseline_str+'feats.npy',feats)
# class_map.json
class_map=json.load(open(dataset_str+'class_map.json','r'))
for k,v in class_map.items():
class_map[k]=v
with open(baseline_str+'class_map.json','w') as f:
json.dump(class_map,f)
|
[
"GraphSAINTKDD19@gmail.com"
] |
GraphSAINTKDD19@gmail.com
|
d6571dd4ac18fe2c876e33b21c8d6e8e9ba84414
|
4773a4adc5a50e948d34b8b2efe8e67baf4a887b
|
/FileEmulator/FileEmulator.py
|
526a727545f90cf9cf74d0040e749c0485f7360a
|
[
"MIT"
] |
permissive
|
abaker2010/FileSimulator
|
bb33c943b95db869dfdaff12bf64ba1bbe50ecee
|
2c3c5fb95be6e54c97c2a82d856d52157bda9f64
|
refs/heads/master
| 2020-06-10T05:40:31.919123
| 2019-06-25T03:12:02
| 2019-06-25T03:12:02
| 193,599,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
#!/usr/bin/python
# Wrote by: Aaron Baker
from classes.RepeatedTimer import RepeatedTimer
from classes.HttpRequestHandler import HttpRequestHandler
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import threading
import urllib.request
import colorama # pip install
from colorama import Fore, Back, Style
global Get_Count
# This is used for getting information from the server
def Get_From_Server():
global Get_Count
global GetFromServer
print(Fore.LIGHTYELLOW_EX + "[+] Client Requesting: {%s}" % Get_Count)
url = 'http://127.0.0.1:8085/index.html/'
response = urllib.request.urlopen(url)
data = response.read()
text = data.decode('utf-8')
Get_Count += 1
if Get_Count % 15 == 0:
GetFromServer.stop()
time.sleep(5)
GetFromServer = RepeatedTimer(0.20, Get_From_Server)
return
# This is used for setting up and starting the server
def StartServer():
# Server settings
server_address = ('127.0.0.1', 8085)
httpd = HTTPServer(server_address, HttpRequestHandler)
print(Fore.LIGHTGREEN_EX + 'running server...\n')
Style.RESET_ALL
Fore.YELLOW
httpd.serve_forever()
return
# Default main
def main():
global GetFromServer
global Serverthread
global Get_Count
Get_Count = 0
colorama.init()
print(Fore.LIGHTRED_EX + "Starting server")
Serverthread = threading.Thread(target=StartServer)
Serverthread.setDaemon(True)
Serverthread.start()
GetFromServer = RepeatedTimer(0.20, Get_From_Server)
while True:
time.sleep(1000)
return
# Exiting the console and stopping HTTP Server
def exit_gracefully():
global GetFromServer
GetFromServer.stop()
print("Stopping...")
return
# Default main call
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
exit_gracefully();
|
[
"crosby.baker2010@gmail.com"
] |
crosby.baker2010@gmail.com
|
0dba7b476911bf14bd295c3c9931ec58a7c48b12
|
31779cb000b2f33a1af94fc5919bca40a6a80485
|
/Snake_pygame.py
|
ae0413a59f580984c923c9378979b3eac72d2314
|
[
"MIT"
] |
permissive
|
lfanning8281/Per3_Charlie_pygame
|
bf10b94f61e97bbbcb5b72c2db292d805ee9c071
|
8d479c3a31c788cee913280a03b7b019b2523a52
|
refs/heads/master
| 2020-03-13T23:00:16.720767
| 2018-06-07T20:35:38
| 2018-06-07T20:35:38
| 131,327,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
import pygame, random, sys
from pygame.locals import *
import snake_game
def collide(x1, x2, y1, y2, w1, w2, h1, h2):
if x1+w1>x2 and x1<x2+w2 and y1+h1>y2 and y1<y2+h2:return True
else:return False
def die(screen, score):
f=pygame.font.SysFont('Arial', 30);
t=f.render('Your score was: '+str(score), True, (0, 0, 0));
screen.blit(t, (10, 270));
pygame.display.update();
pygame.time.wait(2000);
sys.exit(0)
xs = [290, 290, 290, 290, 290];
ys = [290, 270, 250, 230, 210];
dirs = 0;
score = 0;
applepos = (random.randint(0, 590), random.randint(0, 590));
pygame.init();
s=pygame.display.set_mode((600, 600));
pygame.display.set_caption('Snake');
appleimage = pygame.Surface((10, 10));
appleimage.fill((0, 255, 0));
img = pygame.Surface((20, 20));
img.fill((255, 0, 0));
f = pygame.font.SysFont('Arial', 20);
clock = pygame.time.Clock()
while True:
clock.tick(10)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit(0)
elif e.type == KEYDOWN:
if is_up_button_pressed() and dirs != 0:dirs = 2
elif is_down_button_pressed() and dirs != 2:dirs = 0
elif is_left_button_pressed() and dirs != 1:dirs = 3
elif is_right_button_pressed() and dirs != 3:dirs = 1
i = len(xs)-1
while i >= 2:
if collide(xs[0], xs[i], ys[0], ys[i], 20, 20, 20, 20):die(s, score)
i-= 1
if collide(xs[0], applepos[0], ys[0], applepos[1], 20, 10, 20, 10):score+=1;xs.append(700);ys.append(700);applepos=(random.randint(0,590),random.randint(0,590))
if xs[0] < 0 or xs[0] > 580 or ys[0] < 0 or ys[0] > 580: die(s, score)
i = len(xs)-1
while i >= 1:
xs[i] = xs[i-1];ys[i] = ys[i-1];i -= 1
if dirs==0:ys[0] += 20
elif dirs==1:xs[0] += 20
elif dirs==2:ys[0] -= 20
elif dirs==3:xs[0] -= 20
s.fill((255, 255, 255))
for i in range(0, len(xs)):
s.blit(img, (xs[i], ys[i]))
s.blit(appleimage, applepos);t=f.render(str(score), True, (0, 0, 0));s.blit(t, (10, 10));pygame.display.update()
|
[
"LFanning8281@student.pps.net"
] |
LFanning8281@student.pps.net
|
2ccb58c676f371f045e4777a7fdb56173c709531
|
86f5f27668717a2a5bb6a9ee31eece98859e672e
|
/HANDSHAKEEE/server.py
|
c49fe9d14778513dd1d1d84550696ce03326905f
|
[] |
no_license
|
GustavoGB/Physical-Layer
|
7e18f4de33b90507f89f21406024a26bbfc21658
|
645e91fc341301c58d55eb4c9746edacb2ab4eea
|
refs/heads/master
| 2021-01-24T09:04:09.037995
| 2018-06-17T23:53:08
| 2018-06-17T23:53:08
| 123,000,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
#Carareto
#17/02/2018
# Aplicação
####################################################
from enlace import *
import time
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
#serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de) # Windows(variacao de
serialName = "COM4"
#Inicializa Enlace, ativa a comunicação e arquivo a ser recebifo
def main():
# Windows(variacao de)
com = enlace(serialName)
com.enable()
imageW = "./imgs/recebida/recebidaTeste.png"
while (True):
print("HandShake")
#Tipos:
# Syn 1 = 1
# Syn 2 = 3
# Ack 1 = 4
# Ack 2 = 5
# Dados = 7
print("***RECEBENDO.....***")
rxBuffer,tipo = com.getData()
tipo = int.from_bytes(tipo,byteorder='big')
print("Esperando Syn 1 para estabelecer contato......")
#Recepção Syn1
if tipo == 1 :
("***SYN1 ENCONTRADO***")
("___Enviando ACK1___")
data = (8).to_bytes(1,byteorder='big')
tipo = (4).to_bytes(1,byteorder='big')
#Enviar Ack1!!
com.sendData(data,tipo)
print("...........Ack1 enviado")
time.sleep(2.0)
#Enviando Syn2!!!
data = (8).to_bytes(1,byteorder='big')
tipo = (3).to_bytes(1,byteorder='big')
com.sendData(data,tipo)
print("...enviando Syn2...")
else:
print("***ERRO***")
print("***INICIANDO HS NOVAMENTE")
continue
#Recepção Ack2
print("***Esperando ACK2***")
rxBuffer, tipo = com.getData()
tipo = (int.from_bytes(tipo,byteorder='big'))
if tipo == 5:
print("***ACK2 RECEBIDO***")
print("Comunicação Estabelecida")
break
else :
print("***ERRO***")
print("***INICIANDO HS NOVAMENTE")
continue
print("__________________________________________________")
# Faz a recepção dos dados
print("Recebendo pacote com payload... ")
rxBuffer, tipo = com.getData()
# Salva o dado recebido em arquivo
print("__________________________________________________")
print("Salvando dados no arquivo :")
print("{}".format(imageW))
f = open(imageW, 'wb')
f.write(rxBuffer)
# Fecha arquivo de imagem
f.close()
# Encerra comunicação
print("__________________________________________________")
print("Comunicação encerrada")
print("__________________________________________________")
com.disable()
if __name__ == "__main__":
main()
|
[
"gustavo.gobetti98@gmail.com"
] |
gustavo.gobetti98@gmail.com
|
35c9b3e3c688b13470f1b516a72df95ffd4b9d46
|
020d00c279b04daa7dd0a8ec95347884ce8f7f74
|
/unittest/test_case/test_pczh/test_loginapi.py
|
8760e3c5c680906bf52f8a58103b462a5f33a2a8
|
[] |
no_license
|
JasonTang7/inspiration
|
ce6efb328b2b4a17e4ffed335a266e4477707faf
|
567836fc9aebcc67acb2816364ba89ffa8d356db
|
refs/heads/master
| 2022-06-05T02:25:01.610248
| 2022-05-18T03:25:38
| 2022-05-18T03:25:38
| 221,389,569
| 0
| 0
| null | 2020-11-19T09:35:22
| 2019-11-13T06:35:20
|
Groovy
|
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
import time
import unittest
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from functions import pczhpage,common,pczhpageElements
import config
import requests
import json
@unittest.skip("调试用,正式测试不执行此测试类")
class TestLogIn(unittest.TestCase):
"""测试登录接口"""
# @classmethod
# def setUpClass(cls) :
# cls.driver = webdriver.Chrome()
# cls.pc_url = "https://www.yamibuy.com"
# cls.driver.get("https://www.yamibuy.com")
# # @classmethod
# # def tearDownClass(cls):
# # cls.driver.quit()
def test_6login(self):
"""通过接口实现用户登陆"""
login_url="https://customer.yamibuy.com/api/users/login"
# header = {"Cookie":"ymb_tnimager=EKTaL8%2BMHEMNoM2ChNe4hw%3D%3D",
# "Cache-Control":"no-cache",
# "Postman-Token":"<calculated when request is sent>",
# "Content-Type":"application/json",
# "Content-Length":"<calculated when request is sent>",
# "Host":"<calculated when request is sent>",
# "Connection":"keep-alive"}
body_data_json ={"params": {"email": "autotesting@yamibuy.com","pwd": "111111","imagePosition": 66,"isRest": 1}}
response = requests.post(url=login_url,json=body_data_json,verify=False)
json_obj = json.loads(response.content)
token = json_obj['data']['token']
self.driver.delete_cookie("YMB_TK")
self.driver.add_cookie({'name':'YMB_TK', 'value':token})
sleep(2)
self.driver.refresh()
sleep(5)
|
[
"jason.tang@yamibuy.com"
] |
jason.tang@yamibuy.com
|
310f0bae44d7f3f1fadeff6ea38b7dca8369a771
|
635e06886d27732cb35d73fddb14907c9e2f1bda
|
/bin/NER/Model.py
|
4ea56f9f4fc4d47ce44befa218f893afc59e04c6
|
[] |
no_license
|
hoangbao123/chainer-rnn-ner
|
034c0238fac268155d1273d4e09e1517040ef3c5
|
829fb640cd627e5d28fb0d162eb2068615d92156
|
refs/heads/master
| 2020-04-28T03:02:54.899189
| 2017-01-27T15:36:26
| 2017-01-27T15:36:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,724
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.links import NStepLSTM
"""
Model with cross entropy as the loss function.
"""
class TaggerBase(chainer.Chain):
def __init__(self):
pass
# I want to use this for NERTagger, BiNERTagger, BiCharNERTagger
def load_glove(self, path, vocab):
with open(path, "r") as fi:
for line in fi:
line_list = line.strip().split(" ")
word = line_list[0]
if word in vocab:
vec = self.xp.array(line_list[1::], dtype=np.float32)
self.embed.W.data[vocab[word]] = vec
class NERTagger(TaggerBase):
"""
Ordinary LSTM
"""
def __init__(self, n_vocab, n_tag, embed_dim, hidden_dim, dropout):
super(TaggerBase, self).__init__(
embed=L.EmbedID(n_vocab, embed_dim, ignore_label=-1),
l1=L.NStepLSTM(1, embed_dim, embed_dim, dropout=0, use_cudnn=True),
l2=L.Linear(embed_dim, n_tag),
)
if dropout:
self.dropout = True
else:
self.dropout = False
def __call__(self, xs, hx, cx, train=True):
xs = [self.embed(item) for item in xs]
if self.dropout and train:
xs = [F.dropout(item) for item in xs]
hy, cy, ys = self.l1(hx, cx, xs, train=train) # don't use dropout
y = [self.l2(item) for item in ys]
return y
class BiNERTagger(TaggerBase):
"""
Bi-directional LSTM
"""
def __init__(self, n_vocab, n_tag, embed_dim, hidden_dim, dropout):
super(TaggerBase, self).__init__(
embed=L.EmbedID(n_vocab, embed_dim, ignore_label=-1),
forward_l1=L.NStepLSTM(
1, embed_dim, embed_dim, dropout=0, use_cudnn=True),
backward_l1=L.NStepLSTM(
1, embed_dim, embed_dim, dropout=0, use_cudnn=True),
l2=L.Linear(embed_dim * 2, n_tag),
)
if dropout:
self.dropout = True
else:
self.dropout = False
def __call__(self, xs, hx, cx, train=True):
xs = [self.embed(item) for item in xs]
if self.dropout and train:
xs = [F.dropout(item) for item in xs]
xs_backward = [item[::-1] for item in xs]
forward_hy, forward_cy, forward_ys = self.forward_l1(
hx, cx, xs, train=train) # don't use dropout
backward_hy, backward_cy, backward_ys = self.backward_l1(
hx, cx, xs_backward, train=train) # don't use dropout
ys = [F.concat([forward, backward[::-1]], axis=1)
for forward, backward in zip(forward_ys, backward_ys)]
y = [self.l2(item) for item in ys]
return y
class BiCharNERTagger(TaggerBase):
"""
bi-directional LSTM with character-based encoding
"""
def __init__(self, n_vocab, n_char, n_tag, embed_dim, hidden_dim, dropout):
super(TaggerBase, self).__init__(
embed=L.EmbedID(n_vocab, embed_dim, ignore_label=-1),
# character embeddingは25で決め打ち
char_embed=L.EmbedID(n_char, 25, ignore_label=-1),
forward_l1=L.NStepLSTM(
1, embed_dim + 50, embed_dim + 50, dropout=0, use_cudnn=True),
backward_l1=L.NStepLSTM(
1, embed_dim + 50, embed_dim + 50, dropout=0, use_cudnn=True),
l2=L.Linear((embed_dim + 50) * 2, n_tag),
forward_char=L.NStepLSTM(1, 25, 25, dropout=0, use_cudnn=True),
backward_char=L.NStepLSTM(1, 25, 25, dropout=0, use_cudnn=True),
)
if dropout:
self.dropout = True
else:
self.dropout = False
def __call__(self, xs, hx, cx, xxs, train=True):
forward_char_embeds = [
[self.char_embed(item) for item in items] for items in xxs]
backward_char_embeds = [[item[::-1] for item in items]
for items in forward_char_embeds]
# Encode character sequences
forward_encodings = []
backward_encodings = []
for forward, backward in zip(forward_char_embeds, backward_char_embeds):
hhx = chainer.Variable(
self.xp.zeros((1, len(forward), 25), dtype=self.xp.float32))
ccx = chainer.Variable(
self.xp.zeros((1, len(forward), 25), dtype=self.xp.float32))
_, __, forward_char_encs = self.forward_char(hhx, ccx, forward)
_, __, backward_char_encs = self.backward_char(hhx, ccx, backward)
forward_encodings.append([x[-1] for x in forward_char_encs])
backward_encodings.append([x[-1] for x in backward_char_encs])
forward_encodings = [F.vstack(x) for x in forward_encodings]
backward_encodings = [F.vstack(x) for x in backward_encodings]
# Encode word embeddings
xs = [self.embed(item) for item in xs]
xs_forward = [F.concat([x, y, z], axis=1) for x, y, z in zip(
xs, forward_encodings, backward_encodings)]
xs_backward = [x[::-1] for x in xs_forward]
if self.dropout and train:
xs_forward = [F.dropout(item) for item in xs_forward]
xs_backward = [F.dropout(item) for item in xs_backward]
forward_hy, forward_cy, forward_ys = self.forward_l1(
hx, cx, xs_forward, train=train)
backward_hy, backward_cy, backward_ys = self.backward_l1(
hx, cx, xs_backward, train=train)
ys = [F.concat([forward, backward[::-1]], axis=1)
for forward, backward in zip(forward_ys, backward_ys)]
y = [self.l2(item) for item in ys]
return y
|
[
"kiyono@ecei.tohoku.ac.jp"
] |
kiyono@ecei.tohoku.ac.jp
|
ce367ad483dab43120cf161648780ed59c2f69a6
|
4b54b2b1037d5dea88117840b4f58a82fac2d3ea
|
/1006.py
|
b9a2fa2b47ce7f2966860b6c8b611aaae86bedec
|
[] |
no_license
|
cinereous1/UriJudge
|
20879ca2067b2eb374e5dc7471a77366dee1a68a
|
96eab7846e349347d0300d9fc2ac6aa470a2651c
|
refs/heads/master
| 2021-08-24T15:32:02.594187
| 2017-12-10T07:43:01
| 2017-12-10T07:43:01
| 113,373,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
#!/usr/bin/python3
nota0 = float(input().strip())
nota1 = float(input().strip())
nota2 = float(input().strip())
media = ((nota0 * 2) + (nota1 * 3) + (nota2 * 5)) / 10
print("MEDIA = {:.1f}".format(media))
|
[
"linktovoid@gmail.com"
] |
linktovoid@gmail.com
|
2e1ed6f855389c38f9ab0d89770b3963a29c5ff3
|
fef3a61df017422bc2f867538ece7f496fa91416
|
/icecreamratings/config/urls.py
|
842634c0b8bca94faa2a14dcb441499d306fe714
|
[] |
no_license
|
qianzhaicun/my-first-blog
|
1f7f2cc27c3c75d3c456ecdacb3286a4f4d1a484
|
a3e03b389423ffb9e85c9fa9464bfb38efdbbde9
|
refs/heads/master
| 2020-12-03T09:22:04.555591
| 2017-06-28T01:23:26
| 2017-06-28T01:23:26
| 95,615,681
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('icecreamratings.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
[
"admin@DESKTOP-JCH074F"
] |
admin@DESKTOP-JCH074F
|
f3e4a522d9167b441f81517f2bfe17f834b0dd6b
|
553ac874ff9eeffffb9ee0567791a101cc68444c
|
/Mission_to_Mars/scrape_mars.py
|
480b94d27830f63b65c4830870ef8b269d70488a
|
[] |
no_license
|
carlmack01/web-scraping-challenge
|
e770950b094937984a7b1f99aa616296088ae401
|
3aab6f47e785e8dff4dd8c24bb4dcaf4f792f041
|
refs/heads/master
| 2022-11-28T23:33:33.245288
| 2020-08-13T18:56:21
| 2020-08-13T18:56:21
| 287,357,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,449
|
py
|
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import datetime as dt
import time
import re
def scrape():
scrapedict = {}
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
url = "https://mars.nasa.gov/news/"
browser.visit(url)
time.sleep(1)
html_string = browser.html
soup = bs(html_string, 'html.parser')
title = soup.find("div", class_="list_text").find("div", class_="content_title").text
art_para = soup.find("div", class_="list_text").find("div", class_="article_teaser_body").text
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
time.sleep(1)
browser.find_by_id('full_image').click()
browser.links.find_by_partial_text('more info').click()
html_string = browser.html
soup = bs(html_string, 'html.parser')
image = soup.find("img", class_="main_image")['src']
imagebase = "https://www.jpl.nasa.gov"
featured_image_url = imagebase + image
url = 'https://space-facts.com/mars/'
tables = pd.read_html(url)
df = tables[0]
html_table = df.to_html()
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
time.sleep(1)
aelement = browser.find_by_css('a.product-item h3')[0]
firsttext = aelement.text
aelement.click()
firsturl = browser.links.find_by_text('Sample')[0]['href']
browser.back()
aelement = browser.find_by_css('a.product-item h3')[1]
secondtext = aelement.text
aelement.click()
secondurl = browser.links.find_by_text('Sample')[0]['href']
browser.back()
aelement = browser.find_by_css('a.product-item h3')[2]
thirdtext = aelement.text
aelement.click()
thirdurl = browser.links.find_by_text('Sample')[0]['href']
browser.back()
aelement = browser.find_by_css('a.product-item h3')[3]
fourthtext = aelement.text
aelement.click()
fourthurl = browser.links.find_by_text('Sample')[0]['href']
hemisphere_image_urls = [
{"title": firsttext, "img_url": firsturl},
{"title": secondtext, "img_url": secondurl},
{"title": thirdtext, "img_url": thirdurl},
{"title": fourthtext, "img_url": fourthurl},
]
scrapedict = {
"Headline": title,
"Paragraph": art_para,
"Featured_image_url": featured_image_url,
"html_table": html_table,
"hemisphere_info": hemisphere_image_urls
}
browser.quit()
return(scrapedict)
|
[
"carlhmackensen@CARLs-MacBook-Pro.local"
] |
carlhmackensen@CARLs-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.