blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6520b992ca2c8cb414b63226a294c939a1321829
|
d8c7821feb6288d9406e8300d44a5bd0e1721f44
|
/tests/test_bestip.py
|
4cf35df7f4568f2d178104b393627f52c9a4527d
|
[
"MIT"
] |
permissive
|
darwinwen/mootdx
|
aa28f2f933727bcd1852230df8c81baef53f221b
|
71f390dcabfabe884c77bfd0f947c857b44af7a6
|
refs/heads/master
| 2023-08-11T07:58:28.774341
| 2021-09-22T06:39:18
| 2021-09-22T06:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
import unittest
from pathlib import Path
from mootdx import config
class TestBestIP(unittest.TestCase):
def setUp(self) -> None:
conf = Path.home() / '.mootdx' / 'config.json'
conf.unlink()
def test_config_setup(self):
config.setup()
if __name__ == '__main__':
unittest.main()
|
[
"ibopo@126.com"
] |
ibopo@126.com
|
53059ec1496f3a49add846de1ceb5c1407985d80
|
56b86dc09bd8f23323b1e0cc1ef1f5d7012a6a13
|
/constant.py
|
bcbd90112604d608cd4ab25c68b5491edcdb4908
|
[] |
no_license
|
hirenchalodiya1/aryabhatta-scripts
|
d5222b549164dfe51398c79f76127485c1a2a8be
|
972c47a23258dbf2e195585b2308e0529b7c2c00
|
refs/heads/main
| 2021-06-26T05:11:09.764240
| 2021-06-08T08:48:30
| 2021-06-08T08:48:30
| 218,305,875
| 0
| 0
| null | 2019-10-29T14:24:41
| 2019-10-29T14:24:40
| null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
from decouple import config
BASE_URL = config("BASE_URL", cast=str)
|
[
"hirenchalodiya99@gmail.com"
] |
hirenchalodiya99@gmail.com
|
f15ffb0a2d4c5067568f7169e5e32b68f119fadf
|
a6135977d88f405099aa5487b24567a91f9f17a9
|
/setup.py
|
5573e9e91de72d3399eb89fe483a91aeacb2c959
|
[
"MIT"
] |
permissive
|
mylh/scrapeblock
|
dac5042e5dce811129fe26535a4cc374764c0df4
|
68c1c2b77c42f5c0121dc65305e6fff51afcb3ca
|
refs/heads/master
| 2021-01-12T12:03:56.481438
| 2016-12-03T11:17:06
| 2016-12-03T11:17:06
| 69,112,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,359
|
py
|
from setuptools import setup, find_packages
setup(
name='scrapeblock',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1',
description='Block scrapers by IP on Cloudflare by analyzing webserver logs',
# The project's main homepage.
url='https://github.com/mylh/scrapeblock',
# Author details
author='mylh',
author_email='s317011@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# # How mature is this project? Common values are
# # 3 - Alpha
# # 4 - Beta
# # 5 - Production/Stable
'Development Status :: 4 - Beta',
# # Indicate who your project is intended for
'Intended Audience :: DevOps',
# 'Topic :: Software Development :: Build Tools',
# # Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# # Specify the Python versions you support here. In particular, ensure
# # that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='antibot cloudflare ban',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests',
'click',
'logbook',
'PyYAML==3.11',
'python-dateutil',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
entry_points={
'console_scripts': [
'scrapeblock=scrapeblock.cli:cli',
],
}
)
|
[
"s317011@gmail.com"
] |
s317011@gmail.com
|
9521e3a1b1a99aff90c4e224ac15617d2049819f
|
dc7f63c70b3c25f4698dd19e103df94a0e30fd0c
|
/PyGameStart/практика/e.py
|
e1f36892319c294fdddcff0e6ea712ed8e700703
|
[] |
no_license
|
NikitaPoskrebyshev/Practice
|
76e9f1e54bf737fd2f06618fcbe2fb4e9100b24e
|
df2a4894d9735f34236ad0ec441ba01bf468f92d
|
refs/heads/master
| 2023-01-02T19:43:00.836131
| 2020-10-26T14:57:24
| 2020-10-26T14:57:24
| 293,818,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
import pygame as pg
from random import randint as rn
pg.init()
wind = pg.display.set_mode([825, 540])
white = (255, 255, 255)
black = (000, 000, 000)
green = (0, 200, 64)
yellow = (255, 255, 0)
red = (255, 0, 0)
gray = (125, 125, 125)
x1 = red
x2 = yellow
x3 = green
fps = 30
clock = pg.time.Clock()
n = 0
f = 0
while True:
clock.tick(fps)
if n % 15 in [0, 1, 2]:
x1, x2, x3 = red, gray, gray
elif n % 15 in [3]
elif n % 9 in [3, 4, 5]:
x1, x2, x3 = gray, yellow, gray
else:
x1, x2, x3 = gray, gray, green
pg.draw.circle(wind, x1, (250, 100), 50)
pg.draw.circle(wind, x2, (250, 200), 50)
pg.draw.circle(wind, x3, (250, 300), 50)
pg.display.update()
pg.display.set_caption(str(n))
f += 1
if f == 30:
n += 1
f = 0
for event in pg.event.get():
if event.type == pg.QUIT:
exit()
|
[
"box051604@hotmail.com"
] |
box051604@hotmail.com
|
938e8dd79fb4aa182dc486774b8744540aeb5bf8
|
56ae394db9ed7b4d041aaa485d0f7a297b66d727
|
/nBayesClassifier.py
|
ddffde680fad09677f4baf36e680feff3d13056c
|
[] |
no_license
|
CSL551/ailab2_nBayes
|
0eb581f21e5c8f3de7a0736d21efe510a7dfbb08
|
2f3c4bd4835470b6bca55c8b2c148953b3cfa796
|
refs/heads/master
| 2021-01-21T14:52:33.885981
| 2017-06-24T04:18:21
| 2017-06-24T04:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
from __future__ import division
import numpy as np
from scipy.sparse import vstack
from getFeature import X, y
def nBayesClassifier(traindata, trainlabel, testdata, testlabel, threshold):
n = traindata.shape[0] # number of training samples
m = traindata.shape[1] # size of Bag of Words
p_wi_pos = np.zeros(m) # P(wordi | positive)
p_wi_neg = np.zeros(m) # P(wordi | negtive)
countpos = 0 # total number of positive training samples
countneg = 0 # total number of negtive training samples
# training process
print "begin training"
for i in xrange(n):
x = traindata.getrow(i)
if trainlabel[i] == 1:
for j in x.nonzero()[1]:
p_wi_pos[j] += x[0, j]
countpos += 1
else:
for j in x.nonzero()[1]:
p_wi_neg[j] += x[0, j]
countneg += 1
if countpos > 0:
p_wi_pos /= countpos
if countneg > 0:
p_wi_neg /= countneg
for i in xrange(n):
p_wi_pos[i] = min(p_wi_pos[i], 1)
p_wi_neg[i] = min(p_wi_neg[i], 1)
p_pos = countpos / (countpos + countneg)
p_neg = 1 - p_pos
p_wi = p_wi_pos * p_pos + p_wi_neg * p_neg
# test process
print "begin test"
n1 = (testdata.shape)[0] # number of test samples
p_test = np.zeros(n1) # postive and negtive probabilities for test samples
for i in xrange(n1):
p_test[i] = p_pos
x = testdata.getrow(i)
for wid in x.nonzero()[1]:
wcount = x[0, wid]
if p_wi_pos[wid] > 0: # ensure conditional probability is nonzero
p_test[i] *= (p_wi_pos[wid] ** wcount)
p_test[i] /= (p_wi[wid] ** wcount)
y_pred = np.zeros(n1)
correct_count = 0
for i in xrange(n1):
if p_test[i] >= threshold:
y_pred[i] = 1
else:
y_pred[i] = -1
if y_pred[i] == testlabel[i]:
correct_count += 1
return y_pred, correct_count / n1
n = (X.shape)[0]
foldsize = n // 5
with open("cross_validation.txt", "w") as f:
for threshold in [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9]:
f.write("when threshold is {}, ".format(threshold))
avg_accuracy = 0.0
for i in xrange(5):
begin = i * foldsize
end = begin + foldsize
traindata = vstack((X[:begin, :], X[end:, :]))
trainlabel = np.concatenate((y[:begin], y[end:]))
testdata = X[begin:end, :]
testlabel = y[begin:end]
(y_pred, accuracy) = nBayesClassifier(traindata, trainlabel, testdata, testlabel, threshold)
avg_accuracy += accuracy
avg_accuracy /= 5
f.write("the average accuracy is {}.\n".format(avg_accuracy))
#nBayesClassifier(X[0 : int(0.8 * n)][:], y[0 : int(0.8 * n)], X[int(0.8 * n) : n][:], y[int(0.8 * n) : n], 0.5)
|
[
"wjszzyx@sohu.com"
] |
wjszzyx@sohu.com
|
b1db8ab3edf1feb0d31266aff843f54b894baf65
|
217d69e096e96a50a85a9888424d9b368ce0e605
|
/models/engine/file_storage.py
|
222e10f34c8e23860f96a1e5c8ce1c1fc5e328fe
|
[] |
no_license
|
02KellyV/Example_airbnb
|
e6cae8b98252b1544245038d33ba4106596861f2
|
daebcf30d2bb8345d15fa3a957b192e1ae0374b2
|
refs/heads/master
| 2021-01-07T10:59:27.347146
| 2020-02-19T16:50:33
| 2020-02-19T16:50:33
| 241,670,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
import json
import os.path
from models.base_model import BaseModel
class FileStorage:
"""Class for Serializes and Deserializes"""
__file_path = "file.json" #is a file
__objects = {} #is a dict
def all(self):
"""returns the dict __objects"""
return self.__objects
def new(self, obj):
"""sets in __objects the key <obj class name>.id and assign obj entire"""
key = obj.__class__.__name__ + "." + obj.id #class name of an obj + id
self.__objects[key] = obj
#self.__objects.update({key, obj})
def save(self):
"""serializes __objects to the JSON file (path: __file_path)"""
newdict_objs = {} #to store the info that will save
for key, val in self.__objects: #pass trought for each key/val
newdict_objs[key] = val.to_dict()
with open(self.__file_path, 'w') as json_f: #file handling
json_f.write(json.dumps(newdict_objs)) #dumps: encode json data
#converts dict object into JSON string data format and write to file
def reload(self):
"""deserializes the JSON file to __objects"""
if os.path.isfile(self.__file_path):
with open(self.__file_path, 'r') as json_f:
othrdict_objs = json.loads(json_f) #loads: decode json data
for key, val in othrdict_objs.items():
self.__objects[key] = BaseModel(**val)
|
[
"900@holbertonschool.com"
] |
900@holbertonschool.com
|
f3ca0981b6eac4ddc276ebdf8e53f165944caf80
|
6022bddd63001e4f259cbf472237976c3c2d8be3
|
/Restricted_cover_drone/SA for drone Mk5.3.py
|
1ef0e95ebdab530bdff1403dcfae6739e1c7b56f
|
[] |
no_license
|
iaminsu/Python
|
772bfe661fa0abae6c11d899ddc388f36f9f3ac7
|
29b518e3075c4b7cc2d798fed28271b5790573d1
|
refs/heads/master
| 2021-01-16T18:07:28.212444
| 2017-02-20T20:00:51
| 2017-02-20T20:00:51
| 100,039,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,304
|
py
|
#Simulated annealing for location optimization of recharging stations for drone delivery system
#MCLP
#
# 1. using pre-made dictionary from ArcGIS to use attribute tables: read order always follows FID, so it is maintained.
# 2. spatial relationship is assessed by shapley
# - ESP distance should be utilzed to measure distance between locations
# - ArcGIS is way too slow
# 3. Spatially_restricted interchange heuristic
# -
# 4. Brown field: part of solution is fixed (warehouses)
# 5. Point demand representation
#Mk2:
# - add distance restriction
#mk4:
# dual objective, but minimizing both of them
# by turning demand obj into uncovered demand
#mk5:
# new approach: no dual objective, only considering covered demand for obejctive value
# minimizing graph generation & evaluation
# eliminate redundancy
# new interchagne algorithm
# Bug fix: network removal
#new interchange algorithm
#Issues: solution is fixed after some point. Need to change either or both of greedy_fill and
# spatial_interchange_mk3
#Mk5.2
#fix issue with new dictionary files
#Mk5.3
#modification in SA algorithm
# - "remember" the best solution ever, and if the final solution is inferior then the recored best solution, roll back to the best.
import pysal, shapefile, networkx, time, cPickle, random, math, copy, Convexpath_module
from shapely.geometry import Point, Polygon, LineString, MultiPoint, MultiPolygon
from collections import defaultdict
path = "/Users/insuhong/Dropbox/research/Distance restricted covering model/Locating recharging station/data4/"
ffDict = "FF_old_Dictsample_sites_2.shp_sample_demand_2_p.shp_obstacles_p.shp.txt"
obstacles_f = "obstacles_p"
fdDict = "FD_Dictsample_sites_2.shp_sample_demand_2_p.shp_obstacles_p.shp.txt"
demand_Dict = "demands.txt"
facilities_f = "sample_sites_2.shp"
demands_f = "sample_demand_2_p.shp"
ffcords = "FF_coords_Dictsample_sites_2.shp_sample_demand_2_p.shp_obstacles_p.shp.txt"
#loading matrices & initialize variables
def generateGeometry(in_shp):
resultingGeometry = []
if in_shp.header['Shape Type'] == 1:
for i in range(len(in_shp)):
resultingGeometry.append(Point(in_shp.get_shape(i)['X'], in_shp.get_shape(i)['Y']))
elif in_shp.header['Shape Type'] == 3:
for i in range(len(in_shp)):
resultingGeometry.append(LineString(in_shp.get_shape(i)['Vertices']))
elif in_shp.header['Shape Type'] == 5:
for i in range(len(in_shp)):
resultingGeometry.append(Polygon(in_shp.get_shape(i)['Vertices']))
return resultingGeometry
def cal_obj(in_solution):
covered = []
obj = 0
for site in in_solution:
for i in F_Ddict[site]:
covered.append(i[0])
covered = list(set(covered))
#print covered
for de in covered:
obj += float(dDict[de])
return obj
def chk_isolation(in_sol, wh_ids):
#return false if sites are linked to any of warehouses
#assume that the system allows separate delivery network from each warehouse
result = []
for i in in_sol:
if len(result) == 0:
result.append(facil_shp[i].buffer(fd_fullPayload))
else:
result[0] = result[0].union(facil_shp[i].buffer(fd_fullPayload))
if result[0].type == "MultiPolygon":
indi = True
for poly in result[0]:
indi_poly = False
for warehouse in wh_ids:
if poly.intersects(facil_shp[warehouse]):
indi_poly = True
if indi_poly == False:
indi = False
break
if indi == True:
return False
else:
return True
else:
return False
def chk_feasibility(in_solution, save):
feasibility = True
covers = {}
for site in in_solution:
if len(covers) == 0:
covers[in_solution.index(site)] = facil_shp[site].buffer(fd_fullPayload)
else:
in_list = []
for key in covers:
if covers[key].intersects(facil_shp[site]):
in_list.append(key)
area = covers[key].union(facil_shp[site].buffer(fd_fullPayload))
covers[key] = area
if len(in_list) == 0:
covers[in_solution.index(site)] = facil_shp[site].buffer(fd_fullPayload)
elif len(in_list) > 1:
chunk = covers[in_list[0]]
for i in in_list:
chunk = chunk.union(covers[i])
for i in in_list:
covers.pop(i)
covers[in_solution.index(site)] = chunk
for key in covers:
indi = False
for warehouse in warehouses_ID:
if covers[key].intersects(facil_shp[warehouse]):
indi = True
break
if indi == False:
feasibility = False
if save == True:
w = shapefile.Writer(shapefile.POLYGON)
w.field('net')
for key in covers:
w.poly(parts=[[list(x) for x in list(covers[key].exterior.coords)]])
w.record('ff')
w.save(path + "area")
return feasibility
def chk_feasibility_all (in_solution, save):
feasibility = True
covers = {}
for site in in_solution:
if len(covers) == 0:
covers[in_solution.index(site)] = facil_shp[site].buffer(fd_fullPayload)
else:
in_list = []
for key in covers:
if covers[key].intersects(facil_shp[site]):
in_list.append(key)
area = covers[key].union(facil_shp[site].buffer(fd_fullPayload))
covers[key] = area
if len(in_list) == 0:
covers[in_solution.index(site)] = facil_shp[site].buffer(fd_fullPayload)
elif len(in_list) > 1:
chunk = covers[in_list[0]]
for i in in_list:
chunk = chunk.union(covers[i])
for i in in_list:
covers.pop(i)
covers[in_solution.index(site)] = chunk
if len(covers) == 1:
feasibility = True
else:
feasibility = False
if save == True:
w = shapefile.Writer(shapefile.POLYGON)
w.field('net')
for key in covers:
w.poly(parts=[[list(x) for x in list(covers[key].exterior.coords)]])
w.record('ff')
w.save(path + "area")
return feasibility
#network-based feasibility check?
def nn_distance(in_solution):
distance_list = []
for site in in_solution:
dis_list = []
for i in [x for x in in_solution if not x == site]:
dis_list.append((facil_shp[site].distance(facil_shp[i]), i))
dis_list.sort()
distance_list.append((dis_list[0][0], site, dis_list[0][1]))
distance_list.sort()
return distance_list
def removal(in_solution, remove_no):
nn_dist = nn_distance(in_solution)
#print nn_dist
for i in range(remove_no):
if nn_dist[i][2] in in_solution:
in_solution.remove(nn_dist[i][2])
return in_solution
def delivery_network(in_solution, s_file, in_name = "temp_graph"):
arc_list = []
arc_shp_list = []
connectivity = True
resultingGraph = networkx.Graph()
for i in range(len(in_solution)-1):
sites = [x[0] for x in F_Fdict[in_solution[i]]]
for j in range(i+1, len(in_solution)):
if in_solution[j] in sites:
resultingGraph.add_edge((facil_shp[in_solution[i]].x, facil_shp[in_solution[i]].y), (facil_shp[in_solution[j]].x, facil_shp[in_solution[j]].y), weight = F_Fdict2[in_solution[i]][in_solution[j]])
arc_list.append("ESP_" + str(in_solution[i]) + "_" + str(in_solution[j]) + ".shp")
for i in range(len(warehouse_coords)-1):
for j in range(i+1, len(warehouse_coords)):
try:
route = networkx.dijkstra_path(resultingGraph, warehouse_coords[i], warehouse_coords[j])
except:
connectivity = False
break
if connectivity == False:
break
for site in in_solution:
for whouse in warehouse_coords:
try:
route = networkx.dijkstra_path(resultingGraph, (facil_shp[site].x, facil_shp[site].y), whouse)
except:
connectivity = False
break
if connectivity == False:
break
if connectivity == True:
if s_file == True:
w = shapefile.Writer(shapefile.POLYLINE)
w.field('nem')
for line in arc_shp_list:
w.line(parts=[[ list(x) for x in list(line.coords)]])
w.record('chu')
w.save(path + in_name)
return resultingGraph
else:
return None
def delivery_network_mk2(in_solution, s_file, in_name = "temp_graph"):
arc_list = []
arc_shp_list = []
connectivity = True
resultingGraph = networkx.Graph()
for i in range(len(in_solution)-1):
sites = [x[0] for x in F_Fdict[in_solution[i]]]
for j in range(i+1, len(in_solution)):
if in_solution[j] in sites:
resultingGraph.add_edge((facil_shp[in_solution[i]].x, facil_shp[in_solution[i]].y), (facil_shp[in_solution[j]].x, facil_shp[in_solution[j]].y), weight = F_Fdict2[in_solution[i]][in_solution[j]])
arc_list.append("ESP_" + str(in_solution[i]) + "_" + str(in_solution[j]) + ".shp")
if s_file == True:
for arc in arc_list:
arc_pysal = pysal.IOHandlers.pyShpIO.shp_file(path+arc)
arc_shp = generateGeometry(arc_pysal)
arc_shp_list.extend(arc_shp)
w = shapefile.Writer(shapefile.POLYLINE)
w.field('nem')
for line in arc_shp_list:
w.line(parts=[[ list(x) for x in list(line.coords)]])
w.record('chu')
w.save(path + in_name)
return resultingGraph
def delivery_network_mk3(in_solution, s_file, in_name = "temp_graph"):
#check connectivity between warehouses
arc_list = []
arc_shp_list = []
connectivity = True
resultingGraph = networkx.Graph()
for i in range(len(in_solution)-1):
sites = [x[0] for x in F_Fdict[in_solution[i]]]
for j in range(i+1, len(in_solution)):
if in_solution[j] in sites:
resultingGraph.add_edge((facil_shp[in_solution[i]].x, facil_shp[in_solution[i]].y), (facil_shp[in_solution[j]].x, facil_shp[in_solution[j]].y), weight = F_Fdict2[in_solution[i]][in_solution[j]])
arc_list.append("ESP_" + str(in_solution[i]) + "_" + str(in_solution[j]) + ".shp")
for i in range(len(warehouse_coords)-1):
for j in range(i+1, len(warehouse_coords)):
try:
route = networkx.dijkstra_path(resultingGraph, warehouse_coords[i], warehouse_coords[j])
except:
connectivity = False
break
if connectivity == False:
break
if connectivity == True:
if s_file == True:
w = shapefile.Writer(shapefile.POLYLINE)
w.field('nem')
for line in arc_shp_list:
w.line(parts=[[ list(x) for x in list(line.coords)]])
w.record('chu')
w.save(path + in_name)
return resultingGraph
else:
return None
def generate_graph(in_solution):
arc_list = []
arc_shp_list = []
for i in range(len(in_solution)-1):
sites = [x[0] for x in F_Fdict[in_solution[i]]]
for j in range(i+1, len(in_solution)):
if in_solution[j] in sites:
arc_list.append("ESP_" + str(in_solution[i]) + "_" + str(in_solution[j]) + ".shp")
resultingGraph = networkx.Graph()
for arc in arc_list:
arc_pysal = pysal.IOHandlers.pyShpIO.shp_file(path+arc)
arc_shp = generateGeometry(arc_pysal)
arc_shp_list.extend(arc_shp)
for line in arc_shp:
resultingGraph.add_edge(list(line.coords)[0], list(line.coords)[1], weight = line.length)
w = shapefile.Writer(shapefile.POLYLINE)
w.field('nem')
for line in arc_shp_list:
w.line(parts=[[ list(x) for x in list(line.coords)]])
w.record('chu')
w.save(path + "in_name")
return resultingGraph
def restricted_cadidates(in_solution): #for spatial_interchange:
candis = []
for site in in_solution:
for i in F_Fdict[site]:
if i[1] > min_dist:
candis.append(i[0])
too_close = []
for site in in_solution:
too_close.extend(F_F_close_d[site])
candis = list(set(candis))
candis = [x for x in candis if x not in in_solution]
candis = [x for x in candis if x not in too_close]
return candis
def spatial_interchange(in_solution):
print "interchange start"
c = restricted_cadidates(in_solution)
print len(c)
flag = True
while flag == True:
flag = False
while len(c) != 0:
candi = random.choice(c)
c.remove(candi)
current_obj = [in_solution, cal_obj(in_solution)]
removable_solution = [x for x in in_solution if x not in warehouses_ID]
for site in removable_solution:
temp_sol = []
temp_sol.extend(removable_solution)
temp_sol.remove(site)
temp_sol.append(candi)
temp_sol.extend(warehouses_ID)
temp_obj = cal_obj(temp_sol)
if temp_obj > current_obj[1]:
if chk_isolation(temp_sol, warehouses_ID) == False: #prevent island in solution
flag = True
current_obj = [temp_sol, temp_obj]
if flag == True:
in_solution = current_obj[0]
print "interchange finished"
return in_solution
def spatial_interchange_fast(in_solution):
#print "interchange start"
c = restricted_cadidates(in_solution)
#print len(c)
flag = True
while flag == True:
flag = False
while len(c) != 0:
candi = random.choice(c)
c.remove(candi)
current_obj = [in_solution, cal_obj(in_solution)]
removable_solution = [x for x in in_solution if x not in warehouses_ID]
for site in removable_solution:
temp_sol = []
temp_sol.extend(removable_solution)
temp_sol.remove(site)
temp_sol.append(candi)
temp_sol.extend(warehouses_ID)
temp_obj = cal_obj(temp_sol)
if temp_obj > current_obj[1]:
if chk_isolation(temp_sol, warehouses_ID) == False: #prevent island in solution
flag = True
current_obj = [temp_sol, temp_obj]
break
if flag == True:
in_solution = current_obj[0]
flag = False
break
#print "interchange finished"
return in_solution
def spatial_interchage_mk2(in_solution):
flag = True
while flag == True:
current_obj = [in_solution, cal_obj(in_solution)]
removable_solution = [x for x in in_solution if x not in warehouses_ID]
for site in removable_solution:
temp_sol = copy.copy(removable_solution)
temp_sol.remove(site)
candis = restricted_cadidates(temp_sol)
candis = [x for x in candis if not x == site]
for c in candis:
temp2_sol = copy.copy(temp_sol)
if c == site:
continue
else:
temp2_sol.append(c)
temp2_sol.extend(warehouses_ID)
temp2_obj = cal_obj(temp2_sol)
if temp2_obj > current_obj[1]:
if delivery_network_mk3(temp2_sol, False) != None:
#if chk_feasibility_all(temp2_sol, False):
flag = False
in_solution = []
in_solution = copy.copy(temp2_sol)
break
if flag == False:
break
return in_solution
def spatial_interchage_mk3(in_solution):
#modified interchange algorithm
#conventional interchange algorithm cannot be applied since candidate set needs to be updated after any change in
#current solution. So, this interchange algorithm iterate for each site in current solution,
#1) if a site is critical site: find better site that can maintaining connection
#2) if a site is not critical site: find better site from restricted candidate set for all other sites in current solution
current_obj = [in_solution, cal_obj(in_solution)]
in_graph = delivery_network_mk2(in_solution, False)
temp_sol = copy.copy(in_solution)
for site in temp_sol:
if site not in warehouses_ID:
temp_sol2 = copy.copy(in_solution)
temp_sol2.remove(site)
if delivery_network_mk3(temp_sol2, False) == None: #site is critical node
#then only candidates that can restablish connection are considered
adj_nodes = in_graph[(facil_shp[site].x, facil_shp[site].y)].keys()
candis = restricted_cadidates([adj_nodes[0]])
for i in adj_nodes:
candis = [x for x in candis if x in restricted_cadidates[[i]]]
for c in candis:
temp2_obj = cal_obj(temp_sol2 + [c])
if temp2_obj > current_obj[1]:
in_solution = temp_sol2 + [c]
current_obj = [in_solution, cal_obj(in_solution)]
else: #non-critical node
candis = restricted_cadidates(temp_sol2)
for c in candis:
temp2_obj = cal_obj(temp_sol2 + [c])
if temp2_obj > current_obj[1]:
if delivery_network(temp_sol2, False) != None:
in_solution = temp_sol2 + [c]
current_obj = [in_solution, cal_obj(in_solution)]
return in_solution
def spatial_interchage_mk4(in_solution):
#modified interchange algorithm
#conventional interchange algorithm cannot be applied since candidate set needs to be updated after any change in
#current solution. This interchange algorithm *change only 1* site!!
#1) if a site is critical site: find better site that can maintaining connection
#2) if a site is not critical site: find better site from restricted candidate set for all other sites in current solution
current_obj = [in_solution, cal_obj(in_solution)]
in_graph = delivery_network_mk2(in_solution, False)
temp_sol = copy.copy(in_solution)
for site in temp_sol:
indi = False
if site not in warehouses_ID:
temp_sol2 = copy.copy(in_solution)
temp_sol2.remove(site)
if delivery_network_mk3(temp_sol2, False) == None: #site is critical node
#then only candidates that can restablish connection are considered
adj_nodes = in_graph[(facil_shp[site].x, facil_shp[site].y)].keys()
candis = restricted_cadidates([adj_nodes[0]])
for i in adj_nodes:
if len(candis) == 0:
candis = restricted_cadidates([F_FCoords[i]])
else:
candis = [x for x in candis if x in restricted_cadidates([F_FCoords[i]])]
for c in candis:
temp2_obj = cal_obj(temp_sol2 + [c])
if temp2_obj > current_obj[1]:
in_solution = temp_sol2 + [c]
current_obj = [in_solution, cal_obj(in_solution)]
indi = True
else: #non-critical node
candis = restricted_cadidates(temp_sol2)
for c in candis:
temp2_obj = cal_obj(temp_sol2 + [c])
if temp2_obj > current_obj[1]:
if delivery_network(temp_sol2, False) != None:
in_solution = temp_sol2 + [c]
current_obj = [in_solution, cal_obj(in_solution)]
indi = True
if indi == True:
break
return in_solution
def greedy_fill(in_solution=[]):
isolation = True
tt = 0
while isolation == True:
obj_time = 0
new_sol = []
stime = time.time()
new_sol = copy.copy(in_solution)
c_obj = cal_obj(new_sol)
loop_no = 0
pool_len = 0
while len(new_sol) < p:
loop_no += 1
#print new_sol
pool = restricted_cadidates(new_sol)
pool_len += len(pool)
temp = []
stime_l = time.time()
for i in pool:
temp_obj = cal_obj(new_sol + [i])
temp.append((temp_obj, i))
etime_l = time.time()
obj_time += etime_l - stime_l
temp.sort()
temp.reverse()
c_obj = temp[0][0]
new_sol = new_sol + [temp[0][1]]
if delivery_network(new_sol, False) != None:
#if chk_feasibility_all(new_sol, False):
in_solution =[]
in_solution = copy.copy(new_sol)
isolation = False
etime = time.time()
tt += etime - stime
if tt > 600:
print "greedy failed"
print tt
print new_sol
nn = delivery_network_mk2(new_sol, True, "failed_greedy")
chk_feasibility_all(new_sol, True)
f = raw_input()
#print "total time: ", tt
#print "obj time: ", obj_time
#print "average pool: ", float(pool_len)/loop_no
return in_solution
def greedy_fill_mk2(in_solution):
pass
def random_fill(in_solution=[]):
isolation = True
tt = 0
while isolation == True:
stime = time.time()
new_sol = []
new_sol = copy.copy(in_solution)
while len(new_sol) < p:
random_pool = restricted_cadidates(new_sol)
new_sol.append(random.choice(random_pool))
if chk_feasibility_all(new_sol, False):
in_solution = []
in_solution = copy.copy(new_sol)
isolation = False
#etime = time.time()
#tt += etime - stime
#if tt > 20:
#print tt
#print new_sol
#chk_feasibility_all(new_sol, True)
#f = raw_input()
return in_solution
def random_fill_mk2(in_solution):
# 1)2 warehouses case:
# - generate a corridor using ESP between them
# - random select facilities in the corridor until warehoused are connected
# - random select remaining facilities
# 2)More-than-2 warehouses case:
# - generate a convex hull for warehouses
# - derive centroid of convex hull
# - generate a corridor based on the ESPs that connect from warehoused to centroid
# - random select facilities in the corridor until warehoused are connected
# - random select remaining facilities
isolation = True
if len(warehouses_ID) == 2:
w_origin = facil_shp[warehouses_ID[0]]
w_destination = facil_shp[warehouses_ID[1]]
a = Convexpath_module.Convexpath_shapely(path, w_origin, w_destination, obstacles_shp)
w_esp = a.esp #esp is Linestring object
w_corridor = w_esp.buffer(fd_delivery*0.5)
else:
w_points = []
for i in warehouse_coords:
w_points.append(i)
w_mp = MultiPoint(w_points)
w_ch = w_mp.convex_hull
w_cp = w_ch.centroid
w_corridor = []
for i in warehouse_coords:
a = Convexpath_module.Convexpath_shapely(path, Point(i), w_cp, obstacles_shp)
w_corridor.append(a)
#w = shapefile.Writer(shapefile.POLYGON)
#w.field('net')
#for obs in [w_corridor]:
#w.poly(parts=[[list(x) for x in list(obs.exterior.coords)]])
#w.record('ff')
#w.save(path + "w_corridor")
while isolation == True:
new_sol = []
new_sol = copy.copy(in_solution)
while len(new_sol) < p:
if delivery_network_mk3(new_sol, False) == None:
random_pool = restricted_cadidates(new_sol)
#print new_sol
#print random_pool
corridor_pool = []
for i in random_pool:
if w_corridor.intersects(facil_shp[i]):
corridor_pool.append(i)
if len(corridor_pool) != 0:
new_sol.append(random.choice(corridor_pool))
else:
new_sol.append(random.choice(random_pool))
else:
random_pool = restricted_cadidates(new_sol)
new_sol.append(random.choice(random_pool))
if delivery_network(new_sol, False) != None:
in_solution = []
in_solution = copy.copy(new_sol)
isolation = False
return in_solution
def network_removal (in_solution):
#remove certain number of sites from solution. But if a site is part of critical link between warehouses,
#the site will not be removed.
#sites are randomly selected (not based on nn distance)
#if some sites are separated from the delivery network, remove them also regardless of removal number.
remove_no = int(remove_percent * len(in_solution))
sol_wo_wh = [x for x in in_solution if not x in warehouses_ID]
while remove_no > 0:
r_site = random.choice(sol_wo_wh)
temp = copy.copy(sol_wo_wh)
temp.extend(warehouses_ID)
temp.remove(r_site)
temp_graph = delivery_network(temp)
#print temp
#print remove_no
if temp_graph != None:
sol_wo_wh.remove(r_site)
remove_no -= 1
sol_wo_wh.extend(warehouses_ID)
temp_graph = delivery_network(sol_wo_wh)
additional_removal = []
for site in sol_wo_wh:
if site not in warehouses_ID:
site_coords = (facil_shp[site].x, facil_shp[site].y)
for whouse in warehouse_coords:
try:
route = networkx.dijkstra_path(temp_graph, site_coords, whouse)
except:
additional_removal.append(site)
break
sol_wo_wh = [x for x in sol_wo_wh if not x in additional_removal]
return sol_wo_wh
def network_removal_mk2 (in_solution):
#remove certain number of sites from solution. But if a site is part of critical link between warehouses,
#the site will not be removed.
#sites are randomly selected (not based on nn distance)
#if some sites are separated from the delivery network, remove them also regardless of removal number.
remove_no = int(remove_percent * len(in_solution))
#print remove_no
removable_sites = []
#print "in_solution:", in_solution
for site in in_solution:
if site not in warehouses_ID:
temp = copy.copy(in_solution)
temp.remove(site)
temp_graph = delivery_network_mk3(temp, False)
if temp_graph != None:
removable_sites.append(site)
if len(removable_sites) < remove_no:
remove_no = len(removable_sites)
#print "removeable,", removable_sites
#print remove_no
while remove_no > 0:
r_site = random.choice(removable_sites)
removable_sites.remove(r_site)
in_solution.remove(r_site)
temp_graph2 = delivery_network_mk3(in_solution, False)
if temp_graph2 == None:
in_solution.append(r_site)
else:
remove_no -= 1
#print "removed", in_solution
temp_graph = delivery_network_mk2(in_solution, True)
additional_removal = []
#print temp_graph
for site in in_solution:
if site not in warehouses_ID:
site_coords = (facil_shp[site].x, facil_shp[site].y)
for whouse in warehouse_coords:
try:
route = networkx.dijkstra_path(temp_graph, site_coords, whouse)
except networkx.exception.NetworkXNoPath:
additional_removal.append(site)
break
except KeyError:
additional_removal.append(site)
in_solution = [x for x in in_solution if not x in additional_removal]
if len(in_solution) < 5:
print "shit again?"
print "r", additional_removal
print in_solution
r = raw_input()
return in_solution
f_FF = open(path + ffDict)
f_FD = open(path + fdDict)
f_demand = open(path + demand_Dict, 'rb')
F_Fdict = cPickle.load(f_FF)
F_Fdict2 = defaultdict(dict)
for i in F_Fdict:
for j in F_Fdict[i]:
F_Fdict2[i][j[0]] = j[1]
#F_Fdict2 = cPickle.load(open(path + ff2Dict))
F_Ddict = cPickle.load(f_FD)
F_FCoords = cPickle.load(open(path+ ffcords))
facil_pysal = pysal.IOHandlers.pyShpIO.shp_file(path+facilities_f)
demand_pysal = pysal.IOHandlers.pyShpIO.shp_file(path + demands_f)
obstacles_pysal = pysal.IOHandlers.pyShpIO.shp_file(path + obstacles_f)
obstacles_shp = generateGeometry(obstacles_pysal)
dDict = cPickle.load(f_demand)
facil_shp = generateGeometry(facil_pysal)
demand_shp = generateGeometry(demand_pysal)
warehouses_ID = [127,324] #id_f of warehouses
warehouse_coords = []
for warehouse in warehouses_ID:
warehouse_coords.append((facil_shp[warehouse].x, facil_shp[warehouse].y))
solution_sites = []
covered_demand = []
objective_value = 0
p = 25 #
temperature = 30 #end temperature
max_iter = 3 #iteration limit
terminate_temp = 1
temp_ratio = 0.15
sa_count = 0
remove_percent = 0.2
fd_fullPayload = 5 * 5280
fd_empty = 10 * 5280
fd_delivery = 3.33 *5280
min_dist = fd_delivery *0.6
rc = 0.001
rc_obj = 0.1
total_demand = 0.0
F_F_close_d = defaultdict(list)
for i in F_Fdict:
for j in F_Fdict[i]:
if j[1] <= min_dist:
F_F_close_d[i].append(j[0])
for i in dDict:
total_demand += float(dDict[i])
#initializing seed solution (random)
print "initializing solution"
solution_sites.extend(warehouses_ID)
solution_sites = random_fill_mk2(solution_sites)
#print solution_sites
solution_graph = delivery_network_mk2(solution_sites, True)
print "solution initialized"
best_solution = [solution_sites, cal_obj(solution_sites)]
while temperature > 0.5:
current_solution = copy.copy(solution_sites)
current_graph = delivery_network_mk2(current_solution, True, "currrent_graph")
current_obj = cal_obj(current_solution)
print "current Objective value: ", current_obj
new_solution = copy.copy(current_solution)
s_time = time.time()
new_solution = network_removal_mk2 (new_solution)
e_time = time.time()
#print "removed", new_solution
#print "removal time 2: ", e_time - s_time
#print "fill start"
#print "removed obj: ", cal_obj(new_solution)
new_solution = spatial_interchage_mk4(new_solution)
#print "improved obj before greedy: ", cal_obj(new_solution)
s_time = time.time()
new_solution = greedy_fill(new_solution)
n_graph = delivery_network_mk2(new_solution, True, "greey_graph")
e_time = time.time()
#print "fill time: ", e_time - s_time
#print new_solution
#print "spatial interchange start"
s_time = time.time()
new_solution = spatial_interchage_mk4(new_solution)
e_time = time.time()
#print "interchange time: ", e_time - s_time
new_graph = delivery_network_mk2(new_solution, True, "new_solution")
new_obj = cal_obj(new_solution)
print new_obj
#print new_obj - current_obj
if new_obj > current_obj:
if new_obj < best_solution[1]:
sa_count += 1
print "SA COUNT: ", sa_count
if random.random() < math.exp((new_obj - best_solution[1])*rc/temperature):
solution_sites = new_solution
print "new but not best"
if sa_count >= max_iter:
sa_count = 0
temperature = temperature - (temperature * temp_ratio)
print "new temperature: ", temperature
if temperature < terminate_temp:
break
else:
solution_sites = best_solution[0]
print "ignore new one roll back to the best"
if sa_count >= max_iter:
sa_count = 0
temperature = temperature - (temperature * temp_ratio)
print "new temperature: ", temperature
if temperature < terminate_temp:
break
else:
solution_sites = new_solution
best_solution = [new_solution, new_obj]
sa_count = 0
print "new solution accepted"
print "best solution so far: ", best_solution[1]
#print "new objective value: ", new_obj
#print "new solution: ", solution_sites
else:
if sa_count >= max_iter:
sa_count = 0
temperature = temperature - (temperature * temp_ratio)
print "new temperature: ", temperature
if temperature < terminate_temp:
break
else:
sa_count += 1
print "SA COUNT: ", sa_count
#print (new_obj - current_obj)*rc/temperature
if math.exp((new_obj - current_obj)*rc/temperature) == 1:
print "stucted"
print "s"
if random.random() < math.exp((new_obj - current_obj)*rc/temperature):
solution_sites = new_solution
#print "bad solution accepted"
print "new but bad objective: ", new_obj
#print "new but bad solution: ", new_solution
if sa_count >= max_iter:
sa_count = 0
temperature = temperature - (temperature * temp_ratio)
print "new temperature: ", temperature
if temperature < terminate_temp:
break
else:
if sa_count >= max_iter:
sa_count = 0
temperature = temperature - (temperature * temp_ratio)
print "new temperature: ", temperature
if temperature < terminate_temp:
break
print "solution"
solution_obj = cal_obj(solution_sites)
if solution_obj > best_solution[1]:
print "final solution: ", solution_sites
print "Objective value: ", solution_obj
final_graph = delivery_network_mk2(solution_sites, True, "final_solution")
else:
print "final solution: ", best_solution[0]
print "Objective value: ", best_solution[1]
final_graph = delivery_network_mk2(best_solution[0], True, "final_solution")
|
[
"iaminsu@gmail.com"
] |
iaminsu@gmail.com
|
7fddcf6995bd4055ff3467b8b17574821ff7e605
|
141711061038d0a88eab8182e574fcdb2a70c57e
|
/instagramcrawler/pipelines.py
|
3a1f7d4033c538bfbfe01f1033e358f39e222a74
|
[] |
no_license
|
NISH1001/instagramcrawler
|
c0293bfa5e4db6356eefca9980f540acdf114443
|
13e789bd2aeae70012b4c1e3e318dd8611b05f9b
|
refs/heads/master
| 2021-01-20T18:36:02.749125
| 2016-07-25T12:14:49
| 2016-07-25T12:14:49
| 64,131,184
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class InstagramcrawlerPipeline(object):
def __init__(self):
self.owner = {}
self.followers_data = []
self.following_data= []
self.filename = "./data/dump.json"
def process_item(self, item, spider):
print("-" * 50)
data = dict(item)
# pipeline the item accordingly
if item['item_type'] == "owner":
del data['item_type']
self.owner = data
elif item['item_type'] == "follower":
del data['item_type']
self.followers_data.append(data)
else:
del data['item_type']
self.following_data.append(data)
return data
# when spider closes, dump the data to json
def close_spider(self, spider):
print("-" * 50)
print("inside close_spider()")
data = self.owner
data['followers'] = self.followers_data
data['following'] = self.following_data
self.dump(self.filename, data)
def dump(self, filename, data):
configstr = json.dumps(data, indent=4)
with open(filename, "w") as outfile:
outfile.write(configstr)
|
[
"nishanpantha@gmail.com"
] |
nishanpantha@gmail.com
|
f88d91ca6023dd9b7839346963706e6bbbc89ea8
|
2591b2b409b8cefedd6b226f0381aafea2aa123c
|
/leads/signals.py
|
88fc047ef17a8bcc1b5cfff148eed68560c6d1b4
|
[
"MIT"
] |
permissive
|
coderj001/Django-CRM
|
ae5f0cb6b6c5b5dba1e0713c32978f49b021e39f
|
7cca0df5d39b92082781047c1f0a11129179f257
|
refs/heads/master
| 2023-08-14T02:38:26.534561
| 2021-09-15T06:38:45
| 2021-09-15T06:38:45
| 369,114,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from leads.models import User, UserProfile
@receiver(post_save, sender=User)
def post_user_created_signal(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
|
[
"amirajubolchi001@gmail.com"
] |
amirajubolchi001@gmail.com
|
c9e7bc67c4d70d25125930d72994a97f87a38b50
|
003d49fa17fea3644ae71151e44a45dfccb96d3d
|
/tests/system/pfrbeat.py
|
66974c6977d26b87a11277dbf1c37c11ca70491a
|
[
"Apache-2.0"
] |
permissive
|
tak7iji/pfrbeat
|
b5ba75a842a92cb07e11e2874d9a19fbee6f00d4
|
72a61f93d44d50d65b1397e1df92bdf3fd8ca381
|
refs/heads/master
| 2020-01-27T10:03:23.779745
| 2016-09-06T00:43:48
| 2016-09-06T00:43:48
| 67,460,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
import sys
sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system')
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "pfrbeat"
self.build_path = "../../build/system-tests/"
self.beat_path = "../../pfrbeat.test"
|
[
"tak7iji@gmail.com"
] |
tak7iji@gmail.com
|
e51b2093ee74eae80647ebd11a071cc3e3629f30
|
e6356a713dac5a13cced51e4ebaf6b67ea275184
|
/Arduino_Control_Sistem_1.02_64bit.py
|
db564204bda9772218f56d94fd7ef3729c44fc7b
|
[
"MIT"
] |
permissive
|
olegzh7505/Arduino-Control-System
|
88e2834b504daf4ff6b6ef3b1f7188b3bf8c38ae
|
90ba3d1d07c9a48a3552241739663e36c4f26d1c
|
refs/heads/main
| 2023-01-18T18:58:56.716226
| 2020-11-20T15:24:51
| 2020-11-20T15:24:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,408
|
py
|
import sys
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QFileDialog, QSlider, QMenu, QMenuBar, QLCDNumber
from PyQt5.QtWidgets import QPushButton, QLineEdit, QCheckBox, QRadioButton, QButtonGroup, QAction
from PyQt5.QtCore import Qt
from threading import Thread
import serial
import glob
# Разработцик - HollowHunter
# https://habr.com/ru/users/HollowHunter/
def serial_ports():
# Функция со stackoverflow, автор - Thomas
# https://stackoverflow.com/users/300783/thomas
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
class Main_window(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 150, 700, 500)
self.setWindowTitle('Контроллер сериал порта')
self.obg_list = [] # список со всеми нодами
speeds = ['1200', '2400', '4800', '9600', '19200', '38400', '57600', '115200']
self.file_name = ''
self.now_port = ''
self.now_speed = 9600
menu_line = QMenuBar(self)
file = menu_line.addMenu("Файл")
new = QAction('Новый', self)
file.addAction(new)
new.triggered.connect(self.new_progect)
open = QAction("Открыть", self)
file.addAction(open)
open.triggered.connect(self.open_file)
save = QAction("Сохранить", self)
save.setShortcut("Ctrl+S")
file.addAction(save)
save.triggered.connect(self.save_file)
save_us = QAction("Сохранить как", self)
file.addAction(save_us)
save_us.triggered.connect(self.clear_fname)
save_us.triggered.connect(self.save_file)
author = file.addMenu('Автор')
for elem in ['Habr:\tHollowHunter', 'GitHub:\tHollowHunter', 'AlexGyver Community:\tHollowHunter']:
a = QAction(elem, self)
author.addAction(a)
create = menu_line.addMenu('Создать')
btn = QAction('Кнопка', self)
create.addAction(btn)
sld = QAction('Слайдер', self)
create.addAction(sld)
edt = QAction('Поле для ввода', self)
create.addAction(edt)
inp = QAction('Вход данных', self)
create.addAction(inp)
create.triggered[QAction].connect(self.spawn_new_node)
ard = menu_line.addMenu('Подключение ардуино')
self.port = ard.addMenu('Порт:')
self.port.triggered[QAction].connect(self.change_port)
self.udate = QAction('Обновить порты', self)
self.port.addAction(self.udate)
self.display_port = QAction('Текущий порт: None', self)
self.port.addAction(self.display_port)
port_list = serial_ports()
for i in serial_ports():
edt = QAction(i, self)
self.port.addAction(edt)
if len(port_list) == 1:
self.now_port = port_list[0]
self.display_port.setText('Текущий порт: ' + port_list[0])
speed = ard.addMenu('Скорость')
self.speed_viewier = QAction('Текущая скорость: 9600', self)
speed.addAction(self.speed_viewier)
for elem in speeds:
n = QAction(elem, self)
speed.addAction(n)
speed.triggered[QAction].connect(self.change_speed)
self.connect_btn = QAction('Подключить', self)
ard.addAction(self.connect_btn)
self.connect_btn.triggered.connect(self.connect)
dis = QAction('Отключить', self)
ard.addAction(dis)
dis.triggered.connect(self.disConnect)
comand_type = ard.addMenu('Тип сигнала')
self.type_viewier = QAction('Текущий сигнал: ${indx} {comand};')
comand_type.addAction(self.type_viewier)
self.read_ser = Thread(target=self.read_serial_port, daemon=True)
self.read_ser.start()
def read_serial_port(self):
global ser
while 1:
try:
string = ser.readline()
if string != None:
for elem in self.obg_list:
if type(elem) == Input_serial_Node:
elem.displayValue(string.decode()[:-1])
except Exception:
pass
def copy_node(self, parametrs):
parametrs[3] = str(int(parametrs[3]) + 70)
self.close()
self.obg_list.append([Button_Node, Slider_Node, Edit_Node,
Input_serial_Node][int(parametrs[0])](self, parametrs))
self.show()
def mouseMoveEvent(self, event):
global trigered_node
if trigered_node != None and event.x() >= 0 and event.y() >= 0:
trigered_node.ubdate_cord(event.x(), event.y())
def keyPressEvent(self, event):
if not event.isAutoRepeat():
for elem in self.obg_list:
if elem.is_keyword():
elem.change_key_state(1, event.key())
def keyReleaseEvent(self, event):
if not event.isAutoRepeat():
for elem in self.obg_list:
if elem.is_keyword():
elem.change_key_state(1, event.key(), True)
def mouseReleaseEvent(self, event):
global trigered_node
trigered_node = None
def open_file(self):
print(serial_ports())
fname = QFileDialog.getOpenFileName(self, 'Выбрать файл', '',
'Arduino Node Save (*.ans);;Все файлы (*)')[0]
print(fname)
if fname != '':
self.close()
with open(fname, 'r', encoding='utf8') as f_r:
save_file = f_r.read().split('\n')
save_file = list(filter(lambda elem: elem != '', save_file))
for i in range(len(self.obg_list)):
self.obg_list[i].del_widgets()
self.obg_list.clear()
for elem in save_file:
if elem[0] == '#':
continue
e = elem.split('$')
self.obg_list.append([Button_Node, Slider_Node, Edit_Node,
Input_serial_Node][int(e[0])](self, e))
self.file_name = fname
self.show()
def clear_fname(self):
self.file_name = ''
def save_file(self):
if self.file_name == '':
fname = QFileDialog.getSaveFileName(self, 'сохранение', '', 'Arduino Node Save (*.ans)')[0]
else:
fname = self.file_name
self.obg_list = list(filter(lambda elem: elem.is_delete(), self.obg_list))
if fname != '':
with open(fname, 'w', encoding='utf8') as f_w:
for elem in self.obg_list:
print('$'.join(elem.parametrs_return()), file=f_w)
self.file_name = fname
def new_progect(self):
for i in range(len(self.obg_list)):
self.obg_list[i].del_widgets()
self.obg_list.clear()
self.file_name = ''
def spawn_new_node(self, event):
self.close()
if event.text() == 'Кнопка':
self.obg_list.append(Button_Node(self))
elif event.text() == 'Слайдер':
self.obg_list.append(Slider_Node(self))
elif event.text() == 'Поле для ввода':
self.obg_list.append(Edit_Node(self))
elif event.text() == 'Вход данных':
self.obg_list.append(Input_serial_Node(self))
self.show()
def change_port(self, action):
print('hi', action.text())
if action.text()[:13] == 'Текущий порт:' or action.text() == 'Обновить порты':
self.close()
self.port.clear()
self.udate = QAction('Обновить порты', self)
self.port.addAction(self.udate)
self.display_port = QAction('Текущий порт: ' + self.now_port, self)
self.port.addAction(self.display_port)
for i in serial_ports():
edt = QAction(i, self)
self.port.addAction(edt)
self.show()
else:
self.display_port.setText('Текущий порт: ' + action.text())
self.now_port = action.text()
def change_speed(self, action):
try:
self.now_speed = int(action.text())
self.speed_viewier.setText('Текущая скорость: ' + action.text())
except ValueError:
pass
def connect(self):
if self.now_port != '':
try:
global ser
ser = serial.Serial(self.now_port, self.now_speed)
self.connect_btn.setText('Подключено')
except Exception:
self.connect_btn.setText('Не подключено')
def disConnect(self):
global ser
ser.close()
ser = Hollow_serial()
self.connect_btn.setText('Подключить')
class Hollow_serial:
def write(self, data):
pass
def readline(self):
return None
class Node:
def __init__(self, main_obg, name, first_x, first_y):
# переменные кординат левого верхнего угла нода
self.main_window_obg = main_obg # обьект основного окна
self.flag = True # флаг для стрелочки настроек
self.x = int(first_x)
self.y = int(first_y)
self.delete = True
self.left_com = '$'
self.middle_com = ' '
self.right_com = ';'
self.node_name = QLabel(self.main_window_obg)
self.node_name.setText(name)
self.node_name.resize(self.node_name.sizeHint())
self.name = name
self.control_btn = QPushButton('...', self.main_window_obg)
self.control_btn.resize(20, 20)
self.control_btn.pressed.connect(self.press_control_btn)
self.control_btn.clicked.connect(self.released_control_btn)
self.delete_btn = QPushButton('✖', self.main_window_obg)
self.delete_btn.resize(20, 20)
self.delete_btn.clicked.connect(self.del_widgets)
self.copy_btn = QPushButton('❐', self.main_window_obg)
self.copy_btn.resize(20, 20)
self.copy_btn.clicked.connect(self.copy_widget)
self.settings_btn = QPushButton('▲', self.main_window_obg)
self.settings_btn.resize(20, 20)
self.settings_btn.clicked.connect(self.open_setings)
self.text_set1 = QLabel(self.main_window_obg)
self.text_set1.setText('Имя нода:')
self.input_line1 = QLineEdit(name, self.main_window_obg)
self.input_line1.textChanged.connect(self.change_name)
self.input_line1.resize(60, 23)
self.arr_of_elem = [(self.node_name, 42, 1), (self.control_btn, 0, 0),
(self.settings_btn, 21, 0), (self.text_set1, 1, 54),
(self.input_line1, 62, 51), (self.delete_btn, -21, 0), (self.copy_btn, 0, -21)]
self.ubdate_cord(first_x, first_y)
def press_control_btn(self):
global trigered_node
trigered_node = self
def released_control_btn(self):
global trigered_node
trigered_node = None
def ubdate_cord(self, x, y):
for elem in self.arr_of_elem:
elem[0].move(x + elem[1], y + elem[2])
self.x = x
self.y = y
def change_name(self):
self.node_name.setText(self.input_line1.text())
self.node_name.resize(self.node_name.sizeHint())
self.name = self.input_line1.text()
def copy_widget(self):
self.main_window_obg.copy_node(self.parametrs_return())
def is_delete(self):
return self.delete
class Button_Node(Node):
def __init__(self, main_obg, parametrs=['0', 'Вкл', '50', '50', '1', '1', '0', '1',
'выкл', '1', 'Кнопка', 'None', '0']):
super().__init__(main_obg, parametrs[10], int(parametrs[2]), int(parametrs[3]))
self.main_window_obg = main_obg
self.index_comand = parametrs[4]
self.first_comand = parametrs[5] # Первая команда
self.second_comand = parametrs[6] # Вторая команда
self.btn_flag = True # Отправка первой или второй команды
self.parametr_btn = False # наличие второй команды
self.btn_name = parametrs[1]
self.two_btn_name = parametrs[8]
self.size_big_btn = float(parametrs[7])
self.mode = int(parametrs[9]) # тип кнопки 1 - одна команда 2 - две попеременно 3 - две "нажал отпустил"
self.key_state = bool(int(parametrs[12]))
self.key_btn = int(parametrs[11]) if parametrs[11] != 'None' else None
self.key_flag = False
# |--------------------------------------------| обьявление виджетов
self.big_btn = QPushButton(self.btn_name, self.main_window_obg)
self.big_btn.clicked.connect(self.enter_comand)
self.big_btn.pressed.connect(self.enter_comand_for_3_mode)
self.text_set2 = QLabel(self.main_window_obg)
self.text_set2.setText('Имя кнопки 1:')
self.input_line2 = QLineEdit(self.btn_name, self.main_window_obg)
self.input_line2.textChanged.connect(self.change_btn_name_1)
self.input_line2.resize(60, 23)
self.text_set3 = QLabel(self.main_window_obg)
self.text_set3.setText('Индекс:')
self.input_line3 = QLineEdit(self.index_comand, self.main_window_obg)
self.input_line3.textChanged.connect(self.change_index)
self.input_line3.resize(60, 23)
self.text_set4 = QLabel(self.main_window_obg)
self.text_set4.setText('Команда 1:')
self.input_line4 = QLineEdit(self.first_comand, self.main_window_obg)
self.input_line4.textChanged.connect(self.change_first_parametr)
self.input_line4.resize(60, 23)
self.text_set5 = QLabel(self.main_window_obg)
self.text_set5.setText('Размер:')
self.input_line5 = QLineEdit(str(self.size_big_btn), self.main_window_obg)
self.input_line5.editingFinished.connect(self.change_size_big_btn)
self.input_line5.resize(60, 23)
self.rb_group = QButtonGroup(self.main_window_obg)
self.rb1 = QRadioButton("Один сигнал", self.main_window_obg)
self.rb1.move(50, 50)
if self.mode == 1:
self.rb1.click()
self.rb1.clicked.connect(self.update_type)
self.rb2 = QRadioButton("Два сигнала попеременно", self.main_window_obg)
self.rb2.move(80, 50)
if self.mode == 2:
self.rb2.click()
self.rb2.clicked.connect(self.update_type)
self.rb3 = QRadioButton('Два сигнала "нажал-отпустил"', self.main_window_obg)
self.rb3.move(120, 50)
if self.mode == 3:
self.rb3.click()
self.rb3.clicked.connect(self.update_type)
self.rb_group.addButton(self.rb1)
self.rb_group.addButton(self.rb2)
self.rb_group.addButton(self.rb3)
self.text_set7 = QLabel(self.main_window_obg)
self.text_set7.setText('Команда 2:')
self.input_line7 = QLineEdit(self.second_comand, self.main_window_obg)
self.input_line7.textChanged.connect(self.change_second_parametr)
self.input_line7.resize(60, 23)
self.text_set8 = QLabel(self.main_window_obg)
self.text_set8.setText('Имя кнопки 2:')
self.input_line8 = QLineEdit(self.two_btn_name, self.main_window_obg)
self.input_line8.textChanged.connect(self.change_btn_name_2)
self.input_line8.resize(60, 23)
self.key_chekBox = QCheckBox('Использовать клавиши', self.main_window_obg)
self.key_chekBox.stateChanged.connect(self.change_key_state)
if self.key_state:
self.key_chekBox.click()
self.lit = QLabel(self.main_window_obg)
if self.key_btn != None:
self.lit.setText(chr(self.key_btn))
# |--------------------------------------------|
# Список всех виджетов нода и их относительных координат
self.arr_of_elem.extend([(self.big_btn, 0, 21), (self.text_set2, 0, 78),
(self.input_line2, 84, 76), (self.text_set3, 0, 102),
(self.input_line3, 46, 100),
(self.text_set4, 0, 128), (self.input_line4, 66, 125),
(self.text_set5, 0, 152), (self.input_line5, 50, 150),
(self.rb1, 0, 170), (self.rb2, 0, 190), (self.rb3, 0, 210),
(self.text_set7, 0, 232), (self.input_line7, 66, 230),
(self.text_set8, 0, 257), (self.input_line8, 84, 255),
(self.key_chekBox, 0, 280), (self.lit, 0, 50)])
# Список всех виджетов настроек
self.elems_of_settings = [self.text_set1, self.input_line1, self.text_set2,
self.input_line2, self.input_line3, self.text_set3,
self.text_set4, self.input_line4,
self.text_set5, self.input_line5, self.rb1, self.rb2,
self.rb3, self.text_set7, self.input_line7, self.text_set8,
self.input_line8, self.key_chekBox, self.delete_btn, self.copy_btn]
# Список дополнительных настроек
self.additional_widgets = [self.text_set7, self.input_line7,
self.text_set8, self.input_line8]
for elem in self.elems_of_settings:
elem.hide()
self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
self.ubdate_cord(self.x, self.y)
self.update_type()
for elem in self.additional_widgets:
elem.hide()
self.change_key_state(None, self.key_btn)
def del_widgets(self):
if self.delete:
for elem in self.arr_of_elem:
elem[0].deleteLater()
self.delete = False
def parametrs_return(self):
return ['0', self.btn_name, str(self.x), str(self.y), self.index_comand, self.first_comand,
self.second_comand, str(self.size_big_btn), self.two_btn_name, str(self.mode), self.name,
str(self.key_btn), str(int(self.key_state))]
def enter_comand(self):
global ser
if self.mode == 2:
comand = self.left_com + self.index_comand + \
self.middle_com + self.first_comand + self.right_com if self.btn_flag else \
self.left_com + self.index_comand + \
self.middle_com + self.second_comand + self.right_com
print('2', comand)
if self.btn_flag:
ser.write(comand.encode())
self.big_btn.setText(self.btn_name)
self.btn_flag = False
else:
ser.write(comand.encode())
self.big_btn.setText(self.two_btn_name)
self.btn_flag = True
elif self.mode == 1:
comand = self.left_com + self.index_comand + \
self.middle_com + self.first_comand + self.right_com
self.big_btn.setText(self.btn_name)
ser.write(comand.encode())
print(comand)
elif self.mode == 3:
comand = self.left_com + self.index_comand + \
self.middle_com + self.first_comand + self.right_com
self.big_btn.setText(self.btn_name)
ser.write(comand.encode())
print(comand)
def enter_comand_for_3_mode(self):
global ser
if self.mode == 3:
comand = self.left_com + self.index_comand + \
self.middle_com + self.second_comand + self.right_com
self.big_btn.setText(self.two_btn_name)
ser.write(comand.encode())
print(comand)
def change_btn_name_1(self):
self.big_btn.setText(self.input_line2.text())
self.btn_name = self.input_line2.text()
self.big_btn.resize(self.big_btn.sizeHint())
def change_btn_name_2(self):
self.two_btn_name = self.input_line8.text()
def change_index(self):
self.index_comand = self.input_line3.text()
def change_parametr_btn(self):
self.parametr_btn = not self.parametr_btn
if self.parametr_btn:
for elem in [self.text_set2]:
elem.show()
else:
for elem in [self.text_set2]:
elem.hide()
def change_first_parametr(self):
self.first_comand = self.input_line4.text()
def change_second_parametr(self):
self.second_comand = self.input_line7.text()
def change_size_big_btn(self):
self.size_big_btn = float(self.input_line5.text())
self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
def change_key_state(self, data, key=None, released=False):
# self.key_state = not self.key_state
try:
if self.key_chekBox.isChecked() and key == None:
self.key_state = True
self.key_flag = True
self.key_chekBox.setText('Нажмите на клавишу')
elif key != None and self.key_flag:
self.key_chekBox.setText('Нажата клавиша:' + chr(key))
self.lit.setText(chr(key))
#self.lit.resize(self.lit.sizeHint())
self.btn_flag = True
self.key_btn = key
self.key_flag = False
elif self.key_btn == key and data != None:
if self.mode == 3 and released:
self.enter_comand()
elif self.mode == 3 and not released:
self.enter_comand_for_3_mode()
elif not released:
self.big_btn.click()
elif not self.key_chekBox.isChecked():
self.key_btn = key
self.lit.setText('')
self.key_state = False
self.key_chekBox.setText('Использовать клавиши')
except Exception:
pass
def update_type(self):
if self.rb1.isChecked():
self.mode = 1
self.big_btn.setCheckable(False)
for elem in self.additional_widgets:
elem.hide()
elif self.rb2.isChecked():
self.mode = 2
self.big_btn.setCheckable(True)
for elem in self.additional_widgets:
elem.show()
elif self.rb3.isChecked():
self.mode = 3
self.big_btn.setCheckable(False)
for elem in self.additional_widgets:
elem.show()
def open_setings(self):
if self.flag:
self.settings_btn.setText('▼')
self.flag = False
for elem in self.elems_of_settings:
elem.show()
if self.mode == 1:
for elem in self.additional_widgets:
elem.hide()
self.big_btn.resize(100, 30)
self.lit.hide()
else:
self.settings_btn.setText('▲')
self.flag = True
for elem in self.elems_of_settings:
elem.hide()
self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
self.lit.show()
def is_keyword(self):
return True if self.key_state else False
class Slider_Node(Node):
def __init__(self, main_obg, parametrs=['1', 'Слайдер', '50', '50', '10', 1, 1, '', '0', '100']):
super().__init__(main_obg, parametrs[1], int(parametrs[2]), int(parametrs[3]))
self.index_comand = parametrs[4]
self.size_slider = float(parametrs[5])
self.mode = int(parametrs[6]) # тип слайдера
self.value_sld = 0
self.binding = int(parametrs[7]) if parametrs[7] != '' else ''
self.min = int(parametrs[8])
self.max = int(parametrs[9])
# |--------------------------------------------| обьявление виджетов
self.sld = QSlider(Qt.Horizontal, self.main_window_obg)
self.sld.setFocusPolicy(Qt.NoFocus)
self.sld.setGeometry(30, 40, 100, 30)
self.sld.valueChanged[int].connect(self.changeValue)
self.sld.sliderReleased.connect(self.enter_comand)
self.sld.setMinimum(self.min)
self.sld.setMaximum(self.max)
self.sld.resize(int(100 * self.size_slider), 30)
self.text_set2 = QLabel(self.main_window_obg)
self.text_set2.setText('None')
self.text_set3 = QLabel(self.main_window_obg)
self.text_set3.setText('Индекс:')
self.input_line3 = QLineEdit(self.index_comand, self.main_window_obg)
self.input_line3.textChanged.connect(self.change_index)
self.input_line3.resize(60, 23)
self.text_set5 = QLabel(self.main_window_obg)
self.text_set5.setText('Размер:')
self.input_line5 = QLineEdit(str(self.size_slider), self.main_window_obg)
self.input_line5.editingFinished.connect(self.change_size_sld)
self.input_line5.resize(60, 23)
self.text_set4 = QLabel(self.main_window_obg)
self.text_set4.setText('Минимум:')
self.input_line4 = QLineEdit(str(self.min), self.main_window_obg)
self.input_line4.textChanged.connect(self.change_minimum)
self.input_line4.resize(60, 23)
self.text_set6 = QLabel(self.main_window_obg)
self.text_set6.setText('Максимум:')
self.input_line6 = QLineEdit(str(self.max), self.main_window_obg)
self.input_line6.textChanged.connect(self.change_maximum)
self.input_line6.resize(60, 23)
self.text_set7 = QLabel(self.main_window_obg)
self.text_set7.setText('Привязка:')
self.input_line7 = QLineEdit(str(self.binding), self.main_window_obg)
self.input_line7.textChanged.connect(self.change_binding)
self.input_line7.resize(60, 23)
self.rb_group = QButtonGroup(self.main_window_obg)
self.rb1 = QRadioButton("Отправка при отпуске", self.main_window_obg)
self.rb1.move(50, 50)
if self.mode == 1:
self.rb1.click()
self.rb1.clicked.connect(self.update_type)
self.rb2 = QRadioButton("Отправка при изменении", self.main_window_obg)
self.rb2.move(80, 50)
if self.mode == 2:
self.rb2.click()
self.rb2.clicked.connect(self.update_type)
self.rb_group.addButton(self.rb1)
self.rb_group.addButton(self.rb2)
# |--------------------------------------------|
# Список всех виджетов нода и их относительных координат
self.arr_of_elem.extend([(self.text_set2, 0, 50), (self.sld, 0, 21),
(self.text_set3, 0, 76), (self.input_line3, 46, 75),
(self.text_set4, 0, 102), (self.input_line4, 63, 100),
(self.text_set6, 0, 127), (self.input_line6, 63, 125),
(self.text_set5, 0, 152), (self.input_line5, 50, 150),
(self.text_set7, 0, 177), (self.input_line7, 63, 175),
(self.rb1, 0, 195), (self.rb2, 0, 215)])
# Список всех виджетов настроек
self.elems_of_settings = [self.text_set1, self.input_line1,
self.input_line3, self.text_set3,
self.text_set5, self.input_line5, self.rb1, self.rb2,
self.text_set4, self.text_set6, self.input_line4, self.input_line6,
self.text_set7, self.input_line7, self.delete_btn, self.copy_btn]
# Список дополнительных настроек
for elem in self.elems_of_settings:
elem.hide()
# self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
self.ubdate_cord(self.x, self.y)
def del_widgets(self):
if self.delete:
for elem in self.arr_of_elem:
elem[0].deleteLater()
self.delete = False
def parametrs_return(self):
return ['1', self.name, str(self.x), str(self.y), self.index_comand, str(self.size_slider),
str(self.mode), str(self.binding), str(self.min), str(self.max)]
def enter_comand(self):
if self.mode == 1:
global ser
comand = self.left_com + self.index_comand + \
self.middle_com + str(self.value_sld) + self.right_com
ser.write(comand.encode())
if self.binding != '':
try:
self.sld.setValue(int(self.binding))
comand = self.left_com + self.index_comand + \
self.middle_com + str(self.value_sld) + self.right_com
ser.write(comand.encode())
except ValueError:
pass
def change_index(self):
self.index_comand = self.input_line3.text()
def change_size_sld(self):
try:
self.size_slider = float(self.input_line5.text())
self.sld.resize(int(100 * self.size_slider), 30)
except ValueError:
pass
def change_binding(self):
self.binding = self.input_line7.text()
def changeValue(self, value='X_X'):
global ser
self.value_sld = value
self.text_set2.setText(str(value))
self.text_set2.resize(self.text_set2.sizeHint())
if self.mode == 2:
comand = self.left_com + self.index_comand + \
self.middle_com + str(self.value_sld) + self.right_com
ser.write(comand.encode())
def change_maximum(self):
try:
self.max = int(self.input_line6.text())
self.sld.setMaximum(self.max)
except ValueError:
pass
def change_minimum(self):
try:
self.min = int(self.input_line6.text())
self.sld.setMinimum(self.min)
except ValueError:
pass
def update_type(self):
if self.rb1.isChecked():
self.mode = 1
elif self.rb2.isChecked():
self.mode = 2
def open_setings(self):
if self.flag:
self.settings_btn.setText('▼')
self.flag = False
for elem in self.elems_of_settings:
elem.show()
self.text_set2.hide()
else:
self.settings_btn.setText('▲')
self.flag = True
for elem in self.elems_of_settings:
elem.hide()
self.text_set2.show()
def is_keyword(self):
return False
class Edit_Node(Node):
def __init__(self, main_obg, parametrs=['2', 'Ввод', '50', '50', '5']):
super().__init__(main_obg, parametrs[1], int(parametrs[2]), int(parametrs[3]))
self.index_comand = parametrs[4]
# |--------------------------------------------| обьявление виджетов
self.edit = QLineEdit('', self.main_window_obg)
self.edit.editingFinished.connect(self.enter_comand)
self.last_comand1 = QLabel(self.main_window_obg)
self.last_comand1.setText('None')
self.last_comand2 = QLabel(self.main_window_obg)
self.last_comand2.setText('None')
self.last_comand3 = QLabel(self.main_window_obg)
self.last_comand3.setText('None')
self.text_set3 = QLabel(self.main_window_obg)
self.text_set3.setText('Индекс:')
self.input_line3 = QLineEdit(self.index_comand, self.main_window_obg)
self.input_line3.textChanged.connect(self.change_index)
self.input_line3.resize(60, 23)
# self.text_set4 = QLabel(self.main_window_obg)
# self.text_set4.setText('F(x)')
# self.input_line4 = QLineEdit('0', self.main_window_obg)
# self.input_line4.textChanged.connect(self.change_minimum)
# self.input_line4.resize(60, 23)
# |--------------------------------------------|
# Список всех виджетов нода и их относительных координат
self.arr_of_elem.extend([(self.last_comand1, 0, 50), (self.last_comand2, 50, 50),
(self.last_comand3, 100, 50), (self.edit, 0, 25),
(self.text_set3, 0, 76), (self.input_line3, 46, 75)])
# Список всех виджетов настроек
self.elems_of_settings = [self.text_set1, self.input_line1,
self.input_line3, self.text_set3, self.delete_btn, self.copy_btn]
# Список дополнительных настроек
for elem in self.elems_of_settings:
elem.hide()
# self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
self.ubdate_cord(self.x, self.y)
def del_widgets(self):
if self.delete:
for elem in self.arr_of_elem:
elem[0].deleteLater()
self.delete = False
def parametrs_return(self):
return ['2', self.name, str(self.x), str(self.y), self.index_comand]
def enter_comand(self):
global ser
if self.edit.text() != '':
comand = self.left_com + self.index_comand + \
self.middle_com + self.edit.text() + self.right_com
ser.write(comand.encode())
self.last_comand3.setText(self.last_comand2.text())
self.last_comand2.setText(self.last_comand1.text())
self.last_comand1.setText(self.edit.text())
self.last_comand1.resize(self.last_comand1.sizeHint())
self.last_comand2.resize(self.last_comand2.sizeHint())
self.last_comand3.resize(self.last_comand3.sizeHint())
self.edit.setText('')
def change_index(self):
self.index_comand = self.input_line3.text()
def changeValue(self, value='X_X'):
global ser
self.value_sld = value
self.last_comand1.setText(str(value))
self.last_comand1.resize(self.last_comand1.sizeHint())
if self.mode == 2:
comand = comand = self.left_com + self.index_comand + \
self.middle_com + str(self.value_sld) + self.right_com
print('enter', comand)
ser.write(comand.encode())
def open_setings(self):
if self.flag:
self.settings_btn.setText('▼')
self.flag = False
for elem in self.elems_of_settings:
elem.show()
self.last_comand1.hide()
self.last_comand2.hide()
self.last_comand3.hide()
else:
self.settings_btn.setText('▲')
self.flag = True
for elem in self.elems_of_settings:
elem.hide()
self.last_comand1.show()
self.last_comand2.show()
self.last_comand3.show()
def is_keyword(self):
return False
class Input_serial_Node(Node):
def __init__(self, main_obg, parametrs=['3', 'Ввод', '50', '50', '5', '1']):
super().__init__(main_obg, parametrs[1], int(parametrs[2]), int(parametrs[3]))
self.index_comand = parametrs[4]
self.sizeLCD = float(parametrs[5])
# |--------------------------------------------| обьявление виджетов
self.numberLCD = QLCDNumber(self.main_window_obg)
self.numberLCD.resize(int(80 * self.sizeLCD), int(27 * self.sizeLCD))
self.text_set3 = QLabel(self.main_window_obg)
self.text_set3.setText('Индекс:')
self.input_line3 = QLineEdit(self.index_comand, self.main_window_obg)
self.input_line3.textChanged.connect(self.change_index)
self.input_line3.resize(60, 23)
self.text_set4 = QLabel(self.main_window_obg)
self.text_set4.setText('Размер:')
self.input_line4 = QLineEdit(str(self.sizeLCD), self.main_window_obg)
self.input_line4.editingFinished.connect(self.change_size_lcd)
self.input_line4.resize(60, 23)
# |--------------------------------------------|
# Список всех виджетов нода и их относительных координат
self.arr_of_elem.extend([(self.numberLCD, 0, 21), (self.text_set3, 0, 76), (self.input_line3, 48, 75),
(self.text_set4, 0, 102), (self.input_line4, 48, 100)])
# Список всех виджетов настроек
self.elems_of_settings = [self.input_line1, self.text_set1, self.input_line3,
self.text_set3, self.delete_btn,
self.input_line4, self.text_set4, self.copy_btn]
# Список дополнительных настроек
for elem in self.elems_of_settings:
elem.hide()
# self.big_btn.resize(int(100 * self.size_big_btn), int(30 * self.size_big_btn))
self.ubdate_cord(self.x, self.y)
def del_widgets(self):
if self.delete:
for elem in self.arr_of_elem:
elem[0].deleteLater()
self.delete = False
def change_size_lcd(self):
try:
self.sizeLCD = float(self.input_line4.text())
self.numberLCD.resize(int(80 * self.sizeLCD), int(27 * self.sizeLCD))
except ValueError:
pass
def parametrs_return(self):
return ['3', self.name, str(self.x), str(self.y), self.index_comand, str(self.sizeLCD)]
def change_index(self):
self.index_comand = self.input_line3.text()
def displayValue(self, value='error'):
try:
indx = value.split()[0]
com = value.split()[1]
if indx == self.index_comand:
self.numberLCD.display(com)
except:
pass
def open_setings(self):
if self.flag:
self.settings_btn.setText('▼')
self.flag = False
for elem in self.elems_of_settings:
elem.show()
self.numberLCD.resize(80, 27)
else:
self.settings_btn.setText('▲')
self.flag = True
for elem in self.elems_of_settings:
elem.hide()
self.numberLCD.resize(int(80 * self.sizeLCD), int(27 * self.sizeLCD))
def is_keyword(self):
return False
ser = Hollow_serial()
trigered_node = None # глобальная переменная для перетаскивания кнопок
app = QApplication(sys.argv)
ex = Main_window()
ex.show()
sys.exit(app.exec())
|
[
"noreply@github.com"
] |
olegzh7505.noreply@github.com
|
442b035527b9fdc7e66642b0c37964b41e238554
|
ded10c2f2f5f91c44ec950237a59225e8486abd8
|
/.history/2/matrix_squaring_20200421184046.py
|
07c3efd241b763cb74404048ea5fb45bbb30e3b3
|
[] |
no_license
|
jearistiz/Statistical-Physics-Projects
|
276a86407b32ded4e06b32efb2fadbd8eff8daed
|
d9c5b16a50856e148dc8604d92b6de3ea21fc552
|
refs/heads/master
| 2022-11-05T03:41:23.623050
| 2020-06-28T06:36:05
| 2020-06-28T06:36:05
| 254,909,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,095
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def Z_QHO(beta):
"""Uso: devuelve valor de función de partición para el QHO unidimensional"""
return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
"""Uso: devuelve valor de energía interna para el QHO unidimensional"""
return 0.5/np.tanh(0.5*beta)
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción. Debe ser función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
nx = int(nx)
# Si nx es par lo cambiamos al impar más cercano para incluir al 0 en valores de x
if nx%2 == 0:
nx = nx + 1
# Valor de la discretización de posiciones según x_max y nx dados como input
dx = 2 * x_max/(nx-1)
# Lista de valores de x teniendo en cuenta discretización y x_max
grid_x = [i*dx for i in range(-int((nx-1)/2),int((nx-1)/2 + 1))]
# Construcción de matriz densidad dada por aproximación de Trotter
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteración se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteración inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) está a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteración.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de rho, ésta es equivalente a la función
partición a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretixación de las posiciones
dx = grid_x[1] - grid_x[0]
# Cálculo del valor de beta_fin según valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Imprime infromación relevante
if print_steps:
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
# Itera algoritmo matrix squaring
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime información relevante
if print_steps:
print(u'Iteración %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
if print_steps:
print('----------------------------------------------------------------\n' +
u'beta_fin = %.3f'%beta_fin)
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_csv(data, data_headers=None, file_name='file.csv', relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serán las columnas de un archivo CSV que se guardará con
nombre file_name. relevant_info agrega comentarios en primeras líneas del archivo.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada columna es una columna del archivo.
data_headers: numpy array, shape=(nx,) -> nombres de las columnas
file_name: str -> nombre del archivo en el que se guardarán datos.
relevant_info: list of str -> información que se agrega como comentario en
primeras líneas. Cada elemento de esta lista
se agrega como una nueva línea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamación relevante en primera línea.
"""
# Almacena datos de probabilifad en diccionario: grid_x para posiciones y x_weights para
# valores de densidad de probabilidad.
data = np.array(data)
number_of_columns = len(data.transpose())
if file_name=='file.csv':
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
file_name = script_dir + '/' + file_name
if data_headers is None:
data_pdDF = pd.DataFrame(data)
print( 'Nota: no se especificaron headers.\n'+
'Los headers usados en el archivo serán los números 0, 1, 2,...')
elif len(data_headers)!=number_of_columns:
data_pdDF = pd.DataFrame(data)
print( 'Nota: no hay suficientes headers en data_headers para función save_csv().\n'+
'Los headers usados en el archivo serán los números 0, 1, 2,...')
else:
data_pdDF = pd.DataFrame(data,columns=data_headers)
# Crea archivo CSV y agrega comentarios relevantes dados como input
if relevant_info is not None:
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo en formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, file_name=None, relevant_info=None,
plot=True, save_plot=True, show_plot=True):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteración se usa una matriz densidad en aproximación de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteración
inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador
armónico cuántico.
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados.
N_iter: int -> número de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacción usado en aproximación de trotter. Debe
ser función de x.
potential_string: str -> nombre del potencial (con éste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
"""
# Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
grid_x = np.array(grid_x)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring( rho, grid_x, N_iter,
beta_ini, print_steps )
print( '---------------------------------------------------------' +
'---------------------------------------------------------\n'
u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2) +
u' N_iter = %d Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(N_iter,trace_rho) +
'---------------------------------------------------------' +
'---------------------------------------------------------')
# Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo .csv.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data==True:
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
if file_name is None:
csv_file_name = script_dir+u'/pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'\
%(potential_string,beta_fin,x_max,nx,N_iter)
else:
csv_file_name = script_dir + '/'+ file_name
# Información relevante para agregar como comentario al archivo csv.
if relevant_info is None:
relevant_info = [ 'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin ]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = [grid_x.copy(),x_weights.copy()]
pi_x_data_headers = ['position_x','prob_density']
pi_x_data = save_csv(pi_x_data,pi_x_data_headers,csv_file_name,relevant_info,print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
if file_name is None:
plot_file_name = script_dir+u'/pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps'%(potential_string,beta_fin,x_max,nx,N_iter)
else:
plot_file_name = script_dir+u'/pi_x-ms-plot-'+file_name+'.eps'
plt.savefig(plot_file_name)
if show_plot==True:
plt.show()
plt.close()
return rho, trace_rho, grid_x
def Z_several_values( temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
Z_file_name = None, relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot=False, save_plot=False, show_plot=False ):
"""
"""
beta_max = 1./temp_min
beta_min = 1./temp_max
N_temp = int(N_temp)
beta_array = np.linspace(beta_max,beta_min,N_temp)
Z = []
for beta_fin in beta_array:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
print_steps, save_pi_x_data, file_name, relevant_info, plot,
save_plot, show_plot)
Z.append(trace_rho)
Z_data = np.array([beta_array.copy(),1./beta_array.copy(),Z.copy()],dtype=float)
if save_Z_csv == True:
if Z_file_name is None:
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
Z_file_name = 'Z-ms-%s-beta_max_%.3f-'%(potential_string,1./temp_min) +\
'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max) +\
'nx_%d-N_iter_%d.csv'%(nx, N_iter)
Z_file_name = script_dir + '/' + Z_file_name
if relevant_info_Z is None:
relevant_info_Z = [ 'Partition function at several temperatures',
'%s beta_max = %.3f '%(potential_string,1./temp_min) + \
'beta_min = %.3f N_temp = %d '%(1./temp_max,N_temp) + \
'x_max = %.3f nx = %d N_iter = %d'%(x_max,nx, N_iter) ]
Z_data_headers = ['beta','temperature','Z']
Z_data = save_csv( Z_data.transpose(), Z_data_headers, Z_file_name, relevant_info_Z,
print_data = False )
if print_Z_data == True:
print(Z_data)
return Z_data
def average_energy( read_Z_data=True, generate_Z_data=False, Z_file_name = None,
plot_energy=True, save_plot_E=True, show_plot_E=True,
E_plot_name=None,
temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot_pi_x=False, save_plot_pi_x=False, show_plot_pi_x=False ):
"""
"""
if read_Z_data:
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
elif generate_Z_data:
t_0 = time()
Z_data = Z_several_values( temp_min, temp_max, N_temp, save_Z_csv, Z_file_name,
relevant_info_Z, print_Z_data, x_max, nx, N_iter, potential,
potential_string,print_steps, save_pi_x_data, pi_x_file_name,
relevant_info_pi_x, plot_pi_x,save_plot_pi_x, show_plot_pi_x)
t_1 = time()
print( '--------------------------------------------------------------------------\n' +
'%d values of Z(beta) generated --> %.3f sec.'%(N_temp,t_1-t_0))
Z_file_read = Z_data
else:
print( 'Elegir si se generan o se leen los datos para la función partición, Z.\n' +
'Estas opciones son mutuamente exluyentes. Si se seleccionan las dos, el' +
'algoritmo escoge leer los datos.')
# READ DATA IS OK
beta_read = Z_file_read['beta']
temp_read = Z_file_read['temperature']
Z_read = Z_file_read['Z']
E_avg = np.gradient(-np.log(Z_read),beta_read)
if plot_energy:
plt.figure(figsize=(8,5))
plt.plot(temp_read,E_avg,label=u'$\langle E \\rangle$ via path integral\nnaive sampling')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$\langle E \\rangle$ teórico')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$\langle E \\rangle$')
if save_plot_E:
if E_plot_name is None:
script_dir = os.path.dirname(os.path.abspath(__file__))
E_plot_name='E-ms-plot-%s-beta_max_%.3f-'%(potential_string,1./temp_min) +\
'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max) +\
'nx_%d-N_iter_%d.eps'%(nx, N_iter)
E_plot_name = script_dir + '/' + E_plot_name
plt.savefig(E_plot_name)
if show_plot_E:
plt.show()
plt.close()
return E_avg, beta_read.to_numpy()
def average_error(x,xp):
"""
Uso: calcula error ponderado por el número de
"""
x, xp = np.arraylist(x), np.arraylist(xp)
N = len( x )
if N != len(xp):
raise Exception( 'x and xp must have same lenght.' )
else:
return np.sum((x-xp)**2)**0.5 / N
def optimization(nx_min, nx_max, N_iter_min, N_iter_max, beta_fin):
if nx_min%2==0:
nx_min += 1
if nx_max%2==1:
nx_max += 1
dx_grid = []
beta_ini_grid = []
return
# Usar latex en texto de figuras y agrandar tamaño de fuente
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Obtenemos path para guardar archivos en el mismo directorio donde se ubica el script
script_dir = os.path.dirname(os.path.abspath(__file__))
#################################################################################################
# Corre algoritmo matrix squaring
#
#
# Decide si corre esta parte del algoritmo
run_ms_algorithm = True
# Parámetros físicos del algoritmo
x_max = 5.
nx = 501
N_iter = 14
beta_fin = 4
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Parámetros técnicos
print_steps = False
save_data = False
file_name = None
relevant_info = None
plot = True
save_plot = False
show_plot = True
if run_ms_algorithm:
rho, trace_rho, grid_x = \
run_pi_x_sq_trotter( x_max, nx, N_iter, beta_fin, potential, potential_string,
print_steps, save_data, file_name, relevant_info, plot,
save_plot, show_plot)
#
#
#################################################################################################
#################################################################################################
# Algoritmo para cálculo de energía interna
#
#
# Decide si corre esta parte del algoritmo
calculate_avg_energy = False
# Parámetros técnicos función partición y cálculo de energía
read_Z_data = False
generate_Z_data = True
Z_file_name = None
plot_energy = True
save_plot_E = True
show_plot_E = True
E_plot_name = None #script_dir + 'E.eps'
# Parámetros físicos para calcular Z y <E>
temp_min = 1./10
temp_max = 1./2
N_temp = 10
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Más parámetros técnicos
save_Z_csv = True
relevant_info_Z = None
print_Z_data = False
x_max = 7.
nx = 201
N_iter = 7
print_steps = False
save_pi_x_data = False
pi_x_file_name = None
relevant_info_pi_x = None
plot_pi_x = False
save_plot_pi_x = False
show_plot_pi_x = False
if calculate_avg_energy:
average_energy( read_Z_data, generate_Z_data, Z_file_name, plot_energy, save_plot_E,
show_plot_E, E_plot_name,
temp_min, temp_max, N_temp, save_Z_csv, relevant_info_Z, print_Z_data,
x_max, nx, N_iter, potential, potential_string, print_steps, save_pi_x_data,
pi_x_file_name, relevant_info_pi_x,plot_pi_x, save_plot_pi_x, show_plot_pi_x)
#
#
#################################################################################################
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
b75d87acdf164ee9e6dbbcfb5307c98cc1673af6
|
484a2df8f642cd6c44f7eb98befa8771b18abbaa
|
/anagram_021.py
|
d9cd23c90c1e59af97e1c73c275a3971ac0991c4
|
[] |
no_license
|
abasse-lab/CA117_PYTHON_LABS
|
78b57bdc0aa563516c14447e02fe8301e5a18333
|
2dff2d55d8302051face20f2e06fb815624c5fd7
|
refs/heads/master
| 2022-09-13T18:15:39.643000
| 2020-05-29T22:49:52
| 2020-05-29T22:49:52
| 259,939,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
#!/usr/bin/env python3
import sys
for line in sys.stdin:
[left, right] = line.strip().split()
list_str1 = list(left)
list_str1.sort()
list_str2 = list(right)
list_str2.sort()
if list_str1 == list_str2:
print(True)
else:
print(False)
|
[
"noreply@github.com"
] |
abasse-lab.noreply@github.com
|
8d8785cec29f765e4ce2ba0c02fb28ab59b55104
|
bb00f6dad7248296db3e1f708ce099bdea53bba1
|
/漫画爬虫/动漫之家.py
|
cc176550cc684fccbf115f638836f514e7df4d3e
|
[] |
no_license
|
sengeiou/python
|
b91f1537cd9eb87ae99f46c4c8b21b285111f415
|
cbef6445df3ef59ac3b4cb7e706ce8a2976ee2ed
|
refs/heads/master
| 2023-05-28T23:33:51.913149
| 2021-06-12T09:36:26
| 2021-06-12T09:41:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,479
|
py
|
import requests
import os
import re
from bs4 import *
from contextlib import *
from tqdm import *
from time import *
# 创建保存目录
save_dir = '妖神记'
if save_dir not in os.listdir('./'):
os.mkdir(save_dir)
target_url = "https://www.dmzj.com/info/yaoshenji.html"
# 获取动漫章节链接和章节名
r = requests.get(url = target_url)
bs = BeautifulSoup(r.text, 'lxml')
list_con_li = bs.find('ul', class_="list_con_li")
cartoon_list = list_con_li.find_all('a')
chapter_names = []
chapter_urls = []
for cartoon in cartoon_list:
href = cartoon.get('href')
name = cartoon.text
chapter_names.insert(0, name)
chapter_urls.insert(0, href)
# 下载漫画
for i, url in enumerate(tqdm(chapter_urls)):
download_header = {
'Referer': url
}
name = chapter_names[i]
# 去掉.
while '.' in name:
name = name.replace('.', '')
chapter_save_dir = os.path.join(save_dir, name)
if name not in os.listdir(save_dir):
os.mkdir(chapter_save_dir)
r = requests.get(url = url)
html = BeautifulSoup(r.text, 'lxml')
script_info = html.script
pics = re.findall('\d{13,14}', str(script_info))
for j, pic in enumerate(pics):
if len(pic) == 13:
pics[j] = pic + '0'
pics = sorted(pics, key=lambda x:int(x))
chapterpic_hou = re.findall('\|(\d{5})\|', str(script_info))[0]
chapterpic_qian = re.findall('\|(\d{4})\|', str(script_info))[0]
for idx, pic in enumerate(pics):
if pic[-1] == '0':
url = 'https://images.dmzj.com/img/chapterpic/' + chapterpic_qian + '/' + chapterpic_hou + '/' + pic[:-1] + '.jpg'
else:
url = 'https://images.dmzj.com/img/chapterpic/' + chapterpic_qian + '/' + chapterpic_hou + '/' + pic + '.jpg'
pic_name = '%03d.jpg' % (idx + 1)
pic_save_path = os.path.join(chapter_save_dir, pic_name)
with closing(requests.get(url, headers = download_header, stream = True)) as response:
chunk_size = 1024
content_size = int(response.headers['content-length'])
if response.status_code == 200:
with open(pic_save_path, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
else:
print('链接异常')
sleep(10)
|
[
"leyuxuan1230@aliyun.com"
] |
leyuxuan1230@aliyun.com
|
5dec90b58863acc02eace86c46807f72c1e085a4
|
a4d339bc0794f7a1770e63ee78035d6e6c52b2ec
|
/src/myLineSearch.py
|
2b1257d61938431e381442de22a668f43bdb6dff
|
[] |
no_license
|
matthiasware/nlp-solver
|
ab82fcc1296df026a969b8ae11609427ac6907d1
|
df9c750378155d091624c7816a0f766accd09afd
|
refs/heads/master
| 2020-04-12T11:58:19.681705
| 2018-12-20T00:40:35
| 2018-12-20T00:40:35
| 162,478,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,176
|
py
|
from __future__ import division, print_function, absolute_import
from warnings import warn
from scipy.optimize import minpack2
import numpy as np
from scipy._lib.six import xrange
__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
'scalar_search_wolfe1', 'scalar_search_wolfe2',
'line_search_armijo']
class LineSearchWarning(RuntimeWarning):
pass
def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
old_fval=None, old_old_fval=None,
args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
xtol=1e-14):
if gfk is None:
gfk = fprime(xk)
if isinstance(fprime, tuple):
eps = fprime[1]
fprime = fprime[0]
newargs = (f, eps) + args
gradient = False
else:
newargs = args
gradient = True
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f(xk + s*pk, *args)
def derphi(s):
gval[0] = fprime(xk + s*pk, *newargs)
if gradient:
gc[0] += 1
else:
fc[0] += len(xk) + 1
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
stp, fval, old_fval = scalar_search_wolfe1(
phi, derphi, old_fval, old_old_fval, derphi0,
c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return stp, fc[0], gc[0], fval, old_fval, gval[0]
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9,
amax=50, amin=1e-8, xtol=1e-14):
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None:
derphi0 = derphi(0.)
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
if alpha1 < 0:
alpha1 = 1.0
else:
alpha1 = 1.0
phi1 = phi0
derphi1 = derphi0
isave = np.zeros((2,), np.intc)
dsave = np.zeros((13,), float)
task = b'START'
maxiter = 100
for i in xrange(maxiter):
print('alpha1', alpha1)
print('phi1', phi1)
print('derphi1', derphi1)
stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
c1, c2, xtol, task,
amin, amax, isave, dsave)
print(stp)
if task[:2] == b'FG':
alpha1 = stp
phi1 = phi(stp)
derphi1 = derphi(stp)
else:
break
else:
# maxiter reached, the line search did not converge
stp = None
if task[:5] == b'ERROR' or task[:4] == b'WARN':
stp = None # failed
return stp, phi1, phi0
line_search = line_search_wolfe1
def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
old_old_fval=None, args=(), c1=1e-4, c2=0.9,
amax=None,
extra_condition=None, maxiter=10):
fc = [0]
gc = [0]
gval = [None]
gval_alpha = [None]
def phi(alpha):
fc[0] += 1
# f3 = f(xk + alpha * pk, *args)
# print('called f with alpha = ', alpha, f3)
return f(xk + alpha * pk, *args)
if isinstance(myfprime, tuple):
def derphi(alpha):
fc[0] += len(xk) + 1
eps = myfprime[1]
fprime = myfprime[0]
newargs = (f, eps) + args
gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use
gval_alpha[0] = alpha
return np.dot(gval[0], pk)
else:
fprime = myfprime
def derphi(alpha):
gc[0] += 1
gval[0] = fprime(xk + alpha * pk, *args) # store for later use
gval_alpha[0] = alpha
# g3 = np.dot(gval[0], pk)
# print("called f' with alpha = ", alpha, g3)
return np.dot(gval[0], pk)
if gfk is None:
gfk = fprime(xk, *args)
derphi0 = np.dot(gfk, pk)
if extra_condition is not None:
# Add the current gradient as argument, to avoid needless
# re-evaluation
def extra_condition2(alpha, phi):
if gval_alpha[0] != alpha:
derphi(alpha)
x = xk + alpha * pk
return extra_condition(alpha, x, phi, gval[0])
else:
extra_condition2 = None
alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
extra_condition2, maxiter=maxiter)
# print('and now here')
# print('alpha_star', alpha_star)
if derphi_star is None:
warn('The line search algorithm did not converge', LineSearchWarning)
else:
# derphi_star is a number (derphi) -- so use the most recently
# calculated gradient used in computing it derphi = gfk*pk
# this is the gradient at the next step no need to compute it
# again in the outer loop.
derphi_star = gval[0]
return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
def scalar_search_wolfe2(phi, derphi=None, phi0=None,
old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9, amax=None,
extra_condition=None, maxiter=10):
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None and derphi is not None:
derphi0 = derphi(0.)
alpha0 = 0
if old_phi0 is not None and derphi0 != 0:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
else:
alpha1 = 1.0
if alpha1 < 0:
alpha1 = 1.0
phi_a1 = phi(alpha1)
#derphi_a1 = derphi(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
if extra_condition is None:
extra_condition = lambda alpha, phi: True
# print('#' * 20)
for i in xrange(maxiter):
if alpha1 == 0 or (amax is not None and alpha0 == amax):
# alpha1 == 0: This shouldn't happen. Perhaps the increment has
# slipped below machine precision?
# alpha_star = None
# phi_star = phi0
# phi0 = old_phi0
# derphi_star = None
if alpha1 == 0:
alpha_star = None
phi_star = phi0
phi0 = old_phi0
derphi_star = None
msg = 'Rounding errors prevent the line search from converging'
# else:
# msg = "The line search algorithm could not find a solution " + \
# "less than or equal to amax: %s" % amax
warn(msg, LineSearchWarning)
break
alpha_star = alpha0
phi_star = phi_a0
derphi_star = derphi_a0
return alpha_star, phi_star, phi0, derphi_star
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
((phi_a1 >= phi_a0) and (i > 1)):
# print('1')
alpha_star, phi_star, derphi_star = \
_zoom(alpha0, alpha1, phi_a0,
phi_a1, derphi_a0, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
derphi_a1 = derphi(alpha1)
if (abs(derphi_a1) <= -c2*derphi0):
# print('4')
if extra_condition(alpha1, phi_a1):
alpha_star = alpha1
phi_star = phi_a1
derphi_star = derphi_a1
break
if (derphi_a1 >= 0):
# print('2')
alpha_star, phi_star, derphi_star = \
_zoom(alpha1, alpha0, phi_a1,
phi_a0, derphi_a1, phi, derphi,
phi0, derphi0, c1, c2, extra_condition)
break
# print('5')
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
if amax is not None:
alpha2 = min(alpha2, amax)
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi(alpha1)
derphi_a0 = derphi_a1
else:
# stopping test maxiter reached
# print('3')
# print('alpha1', alpha1)
alpha_star = alpha1
phi_star = phi_a1
derphi_star = derphi_a1
# print('we are here')
# print(alpha_star, phi_star, phi0, derphi_star)
# warn('The line search algorithm did not converge', LineSearchWarning)
return alpha_star, phi_star, phi0, derphi_star
def _cubicmin(a, fa, fpa, b, fb, c, fc):
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
C = fpa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1 = np.empty((2, 2))
d1[0, 0] = dc ** 2
d1[0, 1] = -db ** 2
d1[1, 0] = -dc ** 3
d1[1, 1] = db ** 3
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
fc - fa - C * dc]).flatten())
A /= denom
B /= denom
radical = B * B - 3 * A * C
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _quadmin(a, fa, fpa, b, fb):
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2, extra_condition):
"""
Part of the optimization algorithm in `scalar_search_wolfe2`.
"""
maxiter = 20
i = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
while True:
# interpolate to find a trial step length between a_lo and
# a_hi Need to choose interpolation here. Use cubic
# interpolation and then if the result is within delta *
# dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too
# close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval) then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is stil too close to the
# end points (or out of the interval) then use bisection
if (i > 0):
cchk = delta1 * dalpha
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
a_rec, phi_rec)
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
qchk = delta2 * dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = a_lo + 0.5*dalpha
# Check new value of a_j
phi_aj = phi(a_j)
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if derphi_aj*(a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
# print('a_j', a_j)
# better this than nothing
a_star = a_j
val_star = phi_aj
valprime_star = derphi(a_j)
break
# Failed to find a conforming step size
a_star = None
val_star = None
valprime_star = None
break
return a_star, val_star, valprime_star
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1*pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval # compute f(xk) -- done in past loop
derphi0 = np.dot(gfk, pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
alpha0=alpha0)
return alpha, fc[0], phi1
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""
Compatibility wrapper for `line_search_armijo`
"""
r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
alpha0=alpha0)
return r[0], r[1], 0, r[2]
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
"""Minimize over alpha, the function ``phi(alpha)``.
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
alpha > 0 is assumed to be a descent direction.
Returns
-------
alpha
phi1
"""
phi_a0 = phi(alpha0)
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0
# Otherwise compute the minimizer of a quadratic interpolant:
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
return alpha1, phi_a1
# Otherwise loop with cubic interpolation until we find an alpha which
# satifies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1
def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5):
f_k = prev_fs[-1]
f_bar = max(prev_fs)
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
return alpha, xp, fp, Fp
def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5,
nu=0.85):
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
# Update C and Q
Q_next = nu * Q + 1
C = (nu * Q * (C + eta) + fp) / Q_next
Q = Q_next
return alpha, xp, fp, Fp, C, Q
|
[
"matthias@mitterreiter.de"
] |
matthias@mitterreiter.de
|
829b749f4cee3c98575c7f598245c168f036ea5e
|
67b0379a12a60e9f26232b81047de3470c4a9ff9
|
/hotline/utils.py
|
e2f6ee454c726943242857094b18b04a9eb73a3a
|
[] |
no_license
|
vintkor/whitemandarin
|
8ea9022b889fac718e0858873a07c586cf8da729
|
5afcfc5eef1bb1cc2febf519b04a4819a7b9648f
|
refs/heads/master
| 2021-05-06T03:35:09.367375
| 2017-12-20T15:43:08
| 2017-12-20T15:43:08
| 114,904,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,242
|
py
|
# -*- coding: utf-8 -*-
from grab import Grab
from pyquery import PyQuery as pq
import json
import os
from cms.local_settings import PROJECT_PATH
import re
from shop.models import *
import time
def scanhot(pageurl):
proxy = Proxy.objects.filter(banned=False).order_by('?')
if proxy:
proxy = proxy[0]
else:
time.sleep(60)
return scanhot(pageurl)
goog = Grab(timeout=30)
goog.setup(proxy=proxy.hostport, proxy_userpwd=proxy.userpass)
goog.go(pageurl)
if goog.doc.pyquery('.g-recaptcha').eq(0):
proxy.banned = True
proxy.save()
return scanhot(pageurl)
return goog
def get_hotline_data2(sresult):
# g = Grab(timeout=30)
# g.load_proxylist(os.path.dirname(os.path.realpath(__file__)) + "/proxy1.txt", source_type='text_file', proxy_type='http', auto_change=True)
# g.go(sresult)
g = scanhot(sresult)
pitems = g.pyquery('div#gallery-box > a')
# import codecs
# file = codecs.open(PROJECT_PATH + '/../6.txt', "w", "utf-8")
# file.write(g.response.unicode_body())
# file.close()
images = []
videosresults = []
mainproperties = []
advensedroperties = []
# assert False, pitems.length
for pitem in pitems:
images.append(pitem.attrib['href'])
# self.stdout.write()
videos = g.pyquery('img.ico.g_statistic')
for pitem in videos:
videosresults.append(pitem.attrib['data-hl_gallery_video_hash'])
# self.stdout.write(pitem.attrib['data-hl_gallery_video_hash'])
# assert False, videos.length
prophtml = pq(g.response.unicode_body())
properties = prophtml('table#full-props-list > tr')
mproperties = prophtml('div#short-props-list > table > tr')
# assert False, len(properties)
try:
name = prophtml('h1.title-24.p_b-5').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip()
name = re.sub("^\s+|\n|\r|\s+$", '', name)
hotline_name = re.sub(r'\s+', ' ', name).replace('"', '')
except:
hotline_name = ""
hotline = sresult.split('hotline.ua')[1]
try:
price = prophtml('a.range-price.orng strong')[0].text.split(u' – ')
price_min = int(price[0].replace(' ','').replace(u'\xa0', ''))
price_max = int(price[1].replace(' ','').replace(u'\xa0', ''))
except:
price_min = 0
price_max = 0
# price_max = re.sub(r'\s+', ' ', price_max)
# assert False, price_max
# file = codecs.open(PROJECT_ROOT + '/static/test2.txt', "w", "utf-8")
# file.write(price_max)
# file.close()
# assert False, price
for prop in properties:
prop_pq = pq(prop)
try:
name = prop_pq('th').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip().replace('"', '')
prop_new = prop_pq('td').outer_html()
prop_new = re.sub(r'<[^>]*?>', '', prop_new).strip().replace('"', '')
advensedroperties.append({'name': name, 'prop': prop_new})
except:
pass
for prop in properties:
try:
prop_pq = pq(prop)
name = prop_pq('th').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip().replace('"', '')
prop_new = prop_pq('td').outer_html()
prop_new = re.sub(r'<[^>]*?>', '', prop_new).strip().replace('"', '')
mainproperties.append({'name': name, 'prop': prop_new})
except:
pass
return {
'url': hotline,
'name': hotline_name,
'photos': images,
'videos': videosresults,
'properties': advensedroperties,
'price_min': price_min,
'price_max': price_max, }
def get_hotline_data(sresult, ppitem=False, save=True):
# g = Grab(timeout=30)
# g.load_proxylist(os.path.dirname(os.path.realpath(__file__)) + "/proxy1.txt", source_type='text_file', proxy_type='http', auto_change=True)
# g.go(sresult)
g = scanhot(sresult)
pitems = g.pyquery('div#gallery-box > a')
# import codecs
# file = codecs.open(PROJECT_PATH + '/../6.txt', "w", "utf-8")
# file.write(g.response.unicode_body())
# file.close()
images = []
videosresults = []
mainproperties = []
advensedroperties = []
# assert False, pitems.length
for pitem in pitems:
images.append(pitem.attrib['href'])
# self.stdout.write()
videos = g.pyquery('img.ico.g_statistic')
for pitem in videos:
videosresults.append(pitem.attrib['data-hl_gallery_video_hash'])
# self.stdout.write(pitem.attrib['data-hl_gallery_video_hash'])
# assert False, videos.length
prophtml = pq(g.response.unicode_body())
properties = prophtml('table#full-props-list > tr')
mproperties = prophtml('div#short-props-list > table > tr')
# assert False, len(properties)
try:
name = prophtml('h1.title-24.p_b-5').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip()
name = re.sub("^\s+|\n|\r|\s+$", '', name)
hotline_name = re.sub(r'\s+', ' ', name).replace('"', '')
except:
hotline_name = ""
hotline = sresult.split('hotline.ua')[1]
try:
price = prophtml('a.range-price.orng strong')[0].text.split(u' – ')
price_min = int(price[0].replace(' ','').replace(u'\xa0', ''))
price_max = int(price[1].replace(' ','').replace(u'\xa0', ''))
except:
price_min = 0
price_max = 0
# price_max = re.sub(r'\s+', ' ', price_max)
# assert False, price_max
# file = codecs.open(PROJECT_ROOT + '/static/test2.txt', "w", "utf-8")
# file.write(price_max)
# file.close()
# assert False, price
for prop in properties:
prop_pq = pq(prop)
try:
name = prop_pq('th').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip().replace('"', '')
prop_new = prop_pq('td').outer_html()
prop_new = re.sub(r'<[^>]*?>', '', prop_new).strip().replace('"', '')
advensedroperties.append({'name': name, 'prop': prop_new})
except:
pass
for prop in properties:
try:
prop_pq = pq(prop)
name = prop_pq('th').outer_html()
name = re.sub(r'<[^>]*?>', '', name).strip().replace('"', '')
prop_new = prop_pq('td').outer_html()
prop_new = re.sub(r'<[^>]*?>', '', prop_new).strip().replace('"', '')
mainproperties.append({'name': name, 'prop': prop_new})
except:
pass
if save:
# self.stdout.write(hotline)
ppitem.hotline = hotline
ppitem.hotline_name = hotline_name
ppitem.hotline_photos = json.dumps(images)
ppitem.hotline_videos = json.dumps(videosresults)
ppitem.hotline_mainfilters = json.dumps(mainproperties)
ppitem.hotline_filters = json.dumps(advensedroperties)
ppitem.hotline_price_min = price_min
ppitem.hotline_price_max = price_max
ppitem.save()
else:
return {
'url': hotline,
'name': hotline_name,
'photos': images,
'videos': videosresults,
'properties': advensedroperties,
'price_min': price_min,
'price_max': price_max, }
|
[
"alkv84@yandex.ru"
] |
alkv84@yandex.ru
|
5afb22c8a9dc87d352c40841a189e5ed0c9e9ddf
|
197e8216091b4e55f01fd837327f1f64301b7f7d
|
/abballsitesmrr/__init__.py
|
fd77e8c8517e2a2d69ca7dd6c7139921f184cde1
|
[] |
no_license
|
jcarter62/getabbflow
|
1b2bce976ce3e438f6c33fc713d71e4eec33890f
|
69bc89808c8df12ec72ee2cbcb9b0b01d91ca277
|
refs/heads/master
| 2022-12-12T12:00:47.391918
| 2020-01-04T01:17:34
| 2020-01-04T01:17:34
| 214,252,662
| 0
| 0
| null | 2022-12-08T06:42:29
| 2019-10-10T18:08:41
|
Python
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
from abbsitemrr import AbbSiteMRR
from abbsites import AbbSites
class AbbAllSitesMRR:
def __init__(self):
self.sites = AbbSites()
self.data = []
sitemrr = AbbSiteMRR()
for s in self.sites.names:
sitemrr.set_name(name=s)
if sitemrr.record is None:
pass
else:
self.data.append(sitemrr.record)
self.data.sort(key=lambda x: x['site'])
return
|
[
"jcarter62@gmail.com"
] |
jcarter62@gmail.com
|
8f8dd3be93c994a551cfa0c126b84be79eaa9ca9
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/bgl.py
|
806981be4c75fcf9e99552991750506b610faa08
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bgL':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
48b7105c6dfd465db3b3a7c65c2cc69ff8163601
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_vis_cam.py
|
bb3ff7237aa5aa39cfa85a7a1bc925671d1024e8
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,088
|
py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import torch
from parameterized import parameterized
from monai.networks.nets import DenseNet, DenseNet121, SEResNet50
from monai.visualize import CAM
# 2D
TEST_CASE_0 = [
{
"model": "densenet2d",
"shape": (2, 1, 48, 64),
"feature_shape": (2, 1, 1, 2),
"target_layers": "class_layers.relu",
"fc_layers": "class_layers.out",
},
(2, 1, 48, 64),
]
# 3D
TEST_CASE_1 = [
{
"model": "densenet3d",
"shape": (2, 1, 6, 6, 6),
"feature_shape": (2, 1, 2, 2, 2),
"target_layers": "class_layers.relu",
"fc_layers": "class_layers.out",
},
(2, 1, 6, 6, 6),
]
# 2D
TEST_CASE_2 = [
{
"model": "senet2d",
"shape": (2, 3, 64, 64),
"feature_shape": (2, 1, 2, 2),
"target_layers": "layer4",
"fc_layers": "last_linear",
},
(2, 1, 64, 64),
]
# 3D
TEST_CASE_3 = [
{
"model": "senet3d",
"shape": (2, 3, 8, 8, 48),
"feature_shape": (2, 1, 1, 1, 2),
"target_layers": "layer4",
"fc_layers": "last_linear",
},
(2, 1, 8, 8, 48),
]
class TestClassActivationMap(unittest.TestCase):
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_shape(self, input_data, expected_shape):
if input_data["model"] == "densenet2d":
model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
if input_data["model"] == "densenet3d":
model = DenseNet(
spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,)
)
if input_data["model"] == "senet2d":
model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
if input_data["model"] == "senet3d":
model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()
cam = CAM(nn_module=model, target_layers=input_data["target_layers"], fc_layers=input_data["fc_layers"])
image = torch.rand(input_data["shape"], device=device)
result = cam(x=image, layer_idx=-1)
fea_shape = cam.feature_map_size(input_data["shape"], device=device)
self.assertTupleEqual(fea_shape, input_data["feature_shape"])
self.assertTupleEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Project-MONAI.noreply@github.com
|
2058bf85584d80481f537ccf94197056e073335b
|
4bc29617a307de54a7fe300c8e863f03321bd003
|
/lib/python3.8/site-packages/trytond/model/modelsql.py
|
9be8b62e7d562ca328830c8ccd9579d121aff876
|
[] |
no_license
|
Davidoff2103/tryton-training
|
f594970e77646f0ffeb42eb4f903252ff0b6c201
|
8d1ec4f2b623f7ca48f38bfda2ac15c01ded35a7
|
refs/heads/master
| 2023-06-01T11:55:05.400233
| 2021-06-09T10:06:56
| 2021-06-09T10:06:56
| 375,275,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62,268
|
py
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import datetime
from itertools import islice, chain
from collections import OrderedDict
from functools import wraps
from sql import (Table, Column, Literal, Desc, Asc, Expression, Null,
NullsFirst, NullsLast)
from sql.functions import CurrentTimestamp, Extract
from sql.conditionals import Coalesce
from sql.operators import Or, And, Operator, Equal
from sql.aggregate import Count, Max
from trytond.model import ModelStorage, ModelView
from trytond.model import fields
from trytond import backend
from trytond.tools import reduce_ids, grouped_slice, cursor_dict
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.cache import LRUDict
from trytond.exceptions import ConcurrencyException
from trytond.rpc import RPC
from trytond.config import config
from .modelstorage import cache_size, is_leaf
class Constraint(object):
__slots__ = ('_table',)
def __init__(self, table):
assert isinstance(table, Table)
self._table = table
@property
def table(self):
return self._table
def __str__(self):
raise NotImplementedError
@property
def params(self):
raise NotImplementedError
class Check(Constraint):
__slots__ = ('_expression',)
def __init__(self, table, expression):
super(Check, self).__init__(table)
assert isinstance(expression, Expression)
self._expression = expression
@property
def expression(self):
return self._expression
def __str__(self):
return 'CHECK(%s)' % self.expression
@property
def params(self):
return self.expression.params
class Unique(Constraint):
__slots__ = ('_columns',)
def __init__(self, table, *columns):
super(Unique, self).__init__(table)
assert all(isinstance(col, Column) for col in columns)
self._columns = tuple(columns)
@property
def columns(self):
return self._columns
@property
def operators(self):
return tuple(Equal for c in self._columns)
def __str__(self):
return 'UNIQUE(%s)' % (', '.join(map(str, self.columns)))
@property
def params(self):
p = []
for column in self.columns:
p.extend(column.params)
return tuple(p)
class Exclude(Constraint):
__slots__ = ('_excludes', '_where')
def __init__(self, table, *excludes, **kwargs):
super(Exclude, self).__init__(table)
assert all(isinstance(c, Expression) and issubclass(o, Operator)
for c, o in excludes), excludes
self._excludes = tuple(excludes)
where = kwargs.get('where')
if where is not None:
assert isinstance(where, Expression)
self._where = where
@property
def excludes(self):
return self._excludes
@property
def columns(self):
return tuple(c for c, _ in self._excludes)
@property
def operators(self):
return tuple(o for _, o in self._excludes)
@property
def where(self):
return self._where
def __str__(self):
exclude = ', '.join('%s WITH %s' % (column, operator._operator)
for column, operator in self.excludes)
where = ''
if self.where:
where = ' WHERE ' + str(self.where)
return 'EXCLUDE (%s)' % exclude + where
@property
def params(self):
p = []
for column, operator in self._excludes:
p.extend(column.params)
if self.where:
p.extend(self.where.params)
return tuple(p)
def no_table_query(func):
@wraps(func)
def wrapper(cls, *args, **kwargs):
if callable(cls.table_query):
raise NotImplementedError("On table_query")
return func(cls, *args, **kwargs)
return wrapper
class ModelSQL(ModelStorage):
"""
Define a model with storage in database.
"""
_table = None # The name of the table in database
_order = None
_order_name = None # Use to force order field when sorting on Many2One
_history = False
table_query = None
@classmethod
def __setup__(cls):
super(ModelSQL, cls).__setup__()
cls._sql_constraints = []
cls._order = [('id', 'ASC')]
cls._sql_error_messages = {}
if issubclass(cls, ModelView):
cls.__rpc__.update({
'history_revisions': RPC(),
})
cls._table = config.get('table', cls.__name__, default=cls._table)
if not cls._table:
cls._table = cls.__name__.replace('.', '_')
assert cls._table[-9:] != '__history', \
'Model _table %s cannot end with "__history"' % cls._table
@classmethod
def __table__(cls):
if callable(cls.table_query):
return cls.table_query()
else:
return Table(cls._table)
@classmethod
def __table_history__(cls):
if not cls._history:
raise ValueError('No history table')
return Table(cls._table + '__history')
@classmethod
def __table_handler__(cls, module_name=None, history=False):
TableHandler = backend.get('TableHandler')
return TableHandler(cls, module_name, history=history)
@classmethod
def __register__(cls, module_name):
cursor = Transaction().connection.cursor()
TableHandler = backend.get('TableHandler')
super(ModelSQL, cls).__register__(module_name)
if callable(cls.table_query):
return
pool = Pool()
# Initiate after the callable test to prevent calling table_query which
# may rely on other model being registered
sql_table = cls.__table__()
# create/update table in the database
table = cls.__table_handler__(module_name)
if cls._history:
history_table = cls.__table_handler__(module_name, history=True)
history_table.index_action('id', action='add')
for field_name, field in cls._fields.items():
if field_name == 'id':
continue
sql_type = field.sql_type()
if not sql_type:
continue
default = None
if field_name in cls._defaults:
def default():
default_ = cls._clean_defaults({
field_name: cls._defaults[field_name](),
})[field_name]
return field.sql_format(default_)
table.add_column(field_name, field._sql_type, default=default)
if cls._history:
history_table.add_column(field_name, field._sql_type)
if isinstance(field, (fields.Integer, fields.Float)):
# migration from tryton 2.2
table.db_default(field_name, None)
if isinstance(field, (fields.Boolean)):
table.db_default(field_name, False)
if isinstance(field, fields.Many2One):
if field.model_name in ('res.user', 'res.group'):
# XXX need to merge ir and res
ref = field.model_name.replace('.', '_')
else:
ref_model = pool.get(field.model_name)
if (issubclass(ref_model, ModelSQL)
and not callable(ref_model.table_query)):
ref = ref_model._table
# Create foreign key table if missing
if not TableHandler.table_exist(ref):
TableHandler(ref_model)
else:
ref = None
if field_name in ['create_uid', 'write_uid']:
# migration from 3.6
table.drop_fk(field_name)
elif ref:
table.add_fk(field_name, ref, field.ondelete)
table.index_action(
field_name, action=field.select and 'add' or 'remove')
required = field.required
# Do not set 'NOT NULL' for Binary field as the database column
# will be left empty if stored in the filestore or filled later by
# the set method.
if isinstance(field, fields.Binary):
required = False
table.not_null_action(
field_name, action=required and 'add' or 'remove')
for field_name, field in cls._fields.items():
if isinstance(field, fields.Many2One) \
and field.model_name == cls.__name__ \
and field.left and field.right:
left_default = cls._defaults.get(field.left, lambda: None)()
right_default = cls._defaults.get(field.right, lambda: None)()
cursor.execute(*sql_table.select(sql_table.id,
where=(Column(sql_table, field.left) == left_default)
| (Column(sql_table, field.left) == Null)
| (Column(sql_table, field.right) == right_default)
| (Column(sql_table, field.right) == Null),
limit=1))
if cursor.fetchone():
cls._rebuild_tree(field_name, None, 0)
for ident, constraint, _ in cls._sql_constraints:
table.add_constraint(ident, constraint)
if cls._history:
cls._update_history_table()
history_table = cls.__table_history__()
cursor.execute(*sql_table.select(sql_table.id, limit=1))
if cursor.fetchone():
cursor.execute(
*history_table.select(history_table.id, limit=1))
if not cursor.fetchone():
columns = [n for n, f in cls._fields.items()
if f.sql_type()]
cursor.execute(*history_table.insert(
[Column(history_table, c) for c in columns],
sql_table.select(*(Column(sql_table, c)
for c in columns))))
cursor.execute(*history_table.update(
[history_table.write_date], [None]))
@classmethod
def _update_history_table(cls):
if cls._history:
history_table = cls.__table_handler__(history=True)
for field_name, field in cls._fields.items():
if not field.sql_type():
continue
history_table.add_column(field_name, field._sql_type)
@classmethod
def _get_error_messages(cls):
res = super(ModelSQL, cls)._get_error_messages()
res += list(cls._sql_error_messages.values())
for _, _, error in cls._sql_constraints:
res.append(error)
return res
@classmethod
def __raise_integrity_error(
cls, exception, values, field_names=None, transaction=None):
pool = Pool()
TableHandler = backend.get('TableHandler')
if field_names is None:
field_names = list(cls._fields.keys())
if transaction is None:
transaction = Transaction()
for field_name in field_names:
if field_name not in cls._fields:
continue
field = cls._fields[field_name]
# Check required fields
if (field.required
and field.sql_type()
and field_name not in ('create_uid', 'create_date')):
if values.get(field_name) is None:
cls.raise_user_error('required_field',
error_args=cls._get_error_args(field_name))
if isinstance(field, fields.Many2One) and values.get(field_name):
Model = pool.get(field.model_name)
create_records = transaction.create_records.get(
field.model_name, set())
delete_records = transaction.delete_records.get(
field.model_name, set())
target_records = Model.search([
('id', '=', field.sql_format(values[field_name])),
], order=[])
if not ((target_records
or (values[field_name] in create_records))
and (values[field_name] not in delete_records)):
error_args = cls._get_error_args(field_name)
error_args['value'] = values[field_name]
cls.raise_user_error('foreign_model_missing',
error_args=error_args)
for name, _, error in cls._sql_constraints:
if TableHandler.convert_name(name) in str(exception):
cls.raise_user_error(error)
for name, error in cls._sql_error_messages.items():
if TableHandler.convert_name(name) in str(exception):
cls.raise_user_error(error)
@classmethod
def history_revisions(cls, ids):
pool = Pool()
ModelAccess = pool.get('ir.model.access')
User = pool.get('res.user')
cursor = Transaction().connection.cursor()
ModelAccess.check(cls.__name__, 'read')
table = cls.__table_history__()
user = User.__table__()
revisions = []
for sub_ids in grouped_slice(ids):
where = reduce_ids(table.id, sub_ids)
cursor.execute(*table.join(user, 'LEFT',
Coalesce(table.write_uid, table.create_uid) == user.id)
.select(
Coalesce(table.write_date, table.create_date),
table.id,
user.name,
where=where))
revisions.append(cursor.fetchall())
revisions = list(chain(*revisions))
revisions.sort(reverse=True)
# SQLite uses char for COALESCE
if revisions and isinstance(revisions[0][0], str):
strptime = datetime.datetime.strptime
format_ = '%Y-%m-%d %H:%M:%S.%f'
revisions = [(strptime(timestamp, format_), id_, name)
for timestamp, id_, name in revisions]
return revisions
@classmethod
def _insert_history(cls, ids, deleted=False):
transaction = Transaction()
cursor = transaction.connection.cursor()
if not cls._history:
return
user = transaction.user
table = cls.__table__()
history = cls.__table_history__()
columns = []
hcolumns = []
if not deleted:
fields = cls._fields
else:
fields = {
'id': cls.id,
'write_uid': cls.write_uid,
'write_date': cls.write_date,
}
for fname, field in sorted(fields.items()):
if not field.sql_type():
continue
columns.append(Column(table, fname))
hcolumns.append(Column(history, fname))
for sub_ids in grouped_slice(ids):
if not deleted:
where = reduce_ids(table.id, sub_ids)
cursor.execute(*history.insert(hcolumns,
table.select(*columns, where=where)))
else:
if transaction.database.has_multirow_insert():
cursor.execute(*history.insert(hcolumns,
[[id_, CurrentTimestamp(), user]
for id_ in sub_ids]))
else:
for id_ in sub_ids:
cursor.execute(*history.insert(hcolumns,
[[id_, CurrentTimestamp(), user]]))
@classmethod
def _restore_history(cls, ids, datetime, _before=False):
if not cls._history:
return
transaction = Transaction()
cursor = transaction.connection.cursor()
table = cls.__table__()
history = cls.__table_history__()
columns = []
hcolumns = []
fnames = sorted(n for n, f in cls._fields.items()
if f.sql_type())
for fname in fnames:
columns.append(Column(table, fname))
if fname == 'write_uid':
hcolumns.append(Literal(transaction.user))
elif fname == 'write_date':
hcolumns.append(CurrentTimestamp())
else:
hcolumns.append(Column(history, fname))
def is_deleted(values):
return all(not v for n, v in zip(fnames, values)
if n not in ['id', 'write_uid', 'write_date'])
to_delete = []
to_update = []
for id_ in ids:
column_datetime = Coalesce(history.write_date, history.create_date)
if not _before:
hwhere = (column_datetime <= datetime)
else:
hwhere = (column_datetime < datetime)
hwhere &= (history.id == id_)
horder = (column_datetime.desc, Column(history, '__id').desc)
cursor.execute(*history.select(*hcolumns,
where=hwhere, order_by=horder, limit=1))
values = cursor.fetchone()
if not values or is_deleted(values):
to_delete.append(id_)
else:
to_update.append(id_)
values = list(values)
cursor.execute(*table.update(columns, values,
where=table.id == id_))
rowcount = cursor.rowcount
if rowcount == -1 or rowcount is None:
cursor.execute(*table.select(table.id,
where=table.id == id_))
rowcount = len(cursor.fetchall())
if rowcount < 1:
cursor.execute(*table.insert(columns, [values]))
if to_delete:
for sub_ids in grouped_slice(to_delete):
where = reduce_ids(table.id, sub_ids)
cursor.execute(*table.delete(where=where))
cls._insert_history(to_delete, True)
if to_update:
cls._insert_history(to_update)
@classmethod
def restore_history(cls, ids, datetime):
'Restore record ids from history at the date time'
cls._restore_history(ids, datetime)
@classmethod
def restore_history_before(cls, ids, datetime):
'Restore record ids from history before the date time'
cls._restore_history(ids, datetime, _before=True)
@classmethod
def __check_timestamp(cls, ids):
transaction = Transaction()
cursor = transaction.connection.cursor()
table = cls.__table__()
if not transaction.timestamp:
return
for sub_ids in grouped_slice(ids):
where = Or()
for id_ in sub_ids:
try:
timestamp = transaction.timestamp.pop(
'%s,%s' % (cls.__name__, id_))
except KeyError:
continue
if timestamp is None:
continue
sql_type = fields.Char('timestamp').sql_type().base
where.append((table.id == id_)
& (Extract('EPOCH',
Coalesce(table.write_date, table.create_date)
).cast(sql_type) != timestamp))
if where:
cursor.execute(*table.select(table.id, where=where, limit=1))
if cursor.fetchone():
raise ConcurrencyException(
'Records were modified in the meanwhile')
@classmethod
@no_table_query
def create(cls, vlist):
DatabaseIntegrityError = backend.get('DatabaseIntegrityError')
transaction = Transaction()
cursor = transaction.connection.cursor()
pool = Pool()
Translation = pool.get('ir.translation')
super(ModelSQL, cls).create(vlist)
table = cls.__table__()
modified_fields = set()
defaults_cache = {} # Store already computed default values
new_ids = []
vlist = [v.copy() for v in vlist]
for values in vlist:
# Clean values
for key in ('create_uid', 'create_date',
'write_uid', 'write_date', 'id'):
if key in values:
del values[key]
modified_fields |= set(values.keys())
# Get default values
default = []
for f in cls._fields.keys():
if (f not in values
and f not in ('create_uid', 'create_date',
'write_uid', 'write_date', 'id')):
if f in defaults_cache:
values[f] = defaults_cache[f]
else:
default.append(f)
if default:
defaults = cls.default_get(default, with_rec_name=False)
defaults = cls._clean_defaults(defaults)
values.update(defaults)
defaults_cache.update(defaults)
insert_columns = [table.create_uid, table.create_date]
insert_values = [transaction.user, CurrentTimestamp()]
# Insert record
for fname, value in values.items():
field = cls._fields[fname]
if not hasattr(field, 'set'):
insert_columns.append(Column(table, fname))
insert_values.append(field.sql_format(value))
try:
if transaction.database.has_returning():
cursor.execute(*table.insert(insert_columns,
[insert_values], [table.id]))
id_new, = cursor.fetchone()
else:
id_new = transaction.database.nextid(
transaction.connection, cls._table)
if id_new:
insert_columns.append(table.id)
insert_values.append(id_new)
cursor.execute(*table.insert(insert_columns,
[insert_values]))
else:
cursor.execute(*table.insert(insert_columns,
[insert_values]))
id_new = transaction.database.lastid(cursor)
new_ids.append(id_new)
except DatabaseIntegrityError as exception:
transaction = Transaction()
with Transaction().new_transaction(), \
Transaction().set_context(_check_access=False):
cls.__raise_integrity_error(
exception, values, transaction=transaction)
raise
transaction.create_records.setdefault(cls.__name__,
set()).update(new_ids)
translation_values = {}
fields_to_set = {}
for values, new_id in zip(vlist, new_ids):
for fname, value in values.items():
field = cls._fields[fname]
if (getattr(field, 'translate', False)
and not hasattr(field, 'set')):
translation_values.setdefault(
'%s,%s' % (cls.__name__, fname), {})[new_id] = value
if hasattr(field, 'set'):
args = fields_to_set.setdefault(fname, [])
actions = iter(args)
for ids, val in zip(actions, actions):
if val == value:
ids.append(new_id)
break
else:
args.extend(([new_id], value))
if translation_values:
for name, translations in translation_values.items():
Translation.set_ids(name, 'model', Transaction().language,
list(translations.keys()), list(translations.values()))
for fname in sorted(fields_to_set, key=cls.index_set_field):
fargs = fields_to_set[fname]
field = cls._fields[fname]
field.set(cls, fname, *fargs)
cls._insert_history(new_ids)
field_names = list(cls._fields.keys())
cls._update_mptt(field_names, [new_ids] * len(field_names))
cls.__check_domain_rule(new_ids, 'create')
records = cls.browse(new_ids)
for sub_records in grouped_slice(records, cache_size()):
cls._validate(sub_records)
cls.trigger_create(records)
return records
@classmethod
def read(cls, ids, fields_names=None):
pool = Pool()
Rule = pool.get('ir.rule')
Translation = pool.get('ir.translation')
ModelAccess = pool.get('ir.model.access')
if not fields_names:
fields_names = []
for field_name in list(cls._fields.keys()):
if ModelAccess.check_relation(cls.__name__, field_name,
mode='read'):
fields_names.append(field_name)
super(ModelSQL, cls).read(ids, fields_names=fields_names)
transaction = Transaction()
cursor = Transaction().connection.cursor()
if not ids:
return []
# construct a clause for the rules :
domain = Rule.domain_get(cls.__name__, mode='read')
fields_related = {}
datetime_fields = []
for field_name in fields_names:
if field_name == '_timestamp':
continue
if '.' in field_name:
field, field_related = field_name.split('.', 1)
fields_related.setdefault(field, [])
fields_related[field].append(field_related)
else:
field = cls._fields[field_name]
if hasattr(field, 'datetime_field') and field.datetime_field:
datetime_fields.append(field.datetime_field)
result = []
table = cls.__table__()
in_max = transaction.database.IN_MAX
history_order = None
history_clause = None
history_limit = None
if (cls._history
and transaction.context.get('_datetime')
and not callable(cls.table_query)):
in_max = 1
table = cls.__table_history__()
column = Coalesce(table.write_date, table.create_date)
history_clause = (column <= Transaction().context['_datetime'])
history_order = (column.desc, Column(table, '__id').desc)
history_limit = 1
columns = []
for f in fields_names + list(fields_related.keys()) + datetime_fields:
field = cls._fields.get(f)
if field and field.sql_type():
columns.append(field.sql_column(table).as_(f))
elif f == '_timestamp' and not callable(cls.table_query):
sql_type = fields.Char('timestamp').sql_type().base
columns.append(Extract('EPOCH',
Coalesce(table.write_date, table.create_date)
).cast(sql_type).as_('_timestamp'))
if len(columns):
if 'id' not in fields_names:
columns.append(table.id.as_('id'))
tables = {None: (table, None)}
if domain:
tables, dom_exp = cls.search_domain(
domain, active_test=False, tables=tables)
from_ = convert_from(None, tables)
for sub_ids in grouped_slice(ids, in_max):
sub_ids = list(sub_ids)
red_sql = reduce_ids(table.id, sub_ids)
where = red_sql
if history_clause:
where &= history_clause
if domain:
where &= dom_exp
cursor.execute(*from_.select(*columns, where=where,
order_by=history_order, limit=history_limit))
fetchall = list(cursor_dict(cursor))
if not len(fetchall) == len({}.fromkeys(sub_ids)):
if domain:
where = red_sql
if history_clause:
where &= history_clause
where &= dom_exp
cursor.execute(*from_.select(table.id, where=where,
order_by=history_order, limit=history_limit))
rowcount = cursor.rowcount
if rowcount == -1 or rowcount is None:
rowcount = len(cursor.fetchall())
if rowcount == len({}.fromkeys(sub_ids)):
cls.raise_user_error('access_error', cls.__name__)
cls.raise_user_error('read_error', cls.__name__)
result.extend(fetchall)
else:
result = [{'id': x} for x in ids]
cachable_fields = []
for column in columns:
# Split the output name to remove SQLite type detection
fname = column.output_name.split()[0]
if fname == '_timestamp':
continue
field = cls._fields[fname]
if not hasattr(field, 'get'):
if getattr(field, 'translate', False):
translations = Translation.get_ids(
cls.__name__ + ',' + fname, 'model',
Transaction().language, ids)
for row in result:
row[fname] = translations.get(row['id']) or row[fname]
if fname != 'id':
cachable_fields.append(fname)
# all fields for which there is a get attribute
getter_fields = [f for f in
fields_names + list(fields_related.keys()) + datetime_fields
if f in cls._fields and hasattr(cls._fields[f], 'get')]
if getter_fields and cachable_fields:
cache = transaction.get_cache().setdefault(
cls.__name__, LRUDict(cache_size()))
for row in result:
if row['id'] not in cache:
cache[row['id']] = {}
for fname in cachable_fields:
cache[row['id']][fname] = row[fname]
func_fields = {}
for fname in getter_fields:
field = cls._fields[fname]
if isinstance(field, fields.Function):
key = (field.getter, getattr(field, 'datetime_field', None))
func_fields.setdefault(key, [])
func_fields[key].append(fname)
elif getattr(field, 'datetime_field', None):
for row in result:
with Transaction().set_context(
_datetime=row[field.datetime_field]):
date_result = field.get([row['id']], cls, fname,
values=[row])
row[fname] = date_result[row['id']]
else:
# get the value of that field for all records/ids
getter_result = field.get(ids, cls, fname, values=result)
for row in result:
row[fname] = getter_result[row['id']]
for key in func_fields:
field_list = func_fields[key]
fname = field_list[0]
field = cls._fields[fname]
_, datetime_field = key
if datetime_field:
for row in result:
with Transaction().set_context(
_datetime=row[datetime_field]):
date_results = field.get([row['id']], cls, field_list,
values=[row])
for fname in field_list:
date_result = date_results[fname]
row[fname] = date_result[row['id']]
else:
getter_results = field.get(ids, cls, field_list, values=result)
for fname in field_list:
getter_result = getter_results[fname]
for row in result:
row[fname] = getter_result[row['id']]
to_del = set()
fields_related2values = {}
for fname in list(fields_related.keys()) + datetime_fields:
if fname not in fields_names:
to_del.add(fname)
if fname not in cls._fields:
continue
if fname not in fields_related:
continue
fields_related2values.setdefault(fname, {})
field = cls._fields[fname]
if field._type in ('many2one', 'one2one'):
if hasattr(field, 'model_name'):
Target = pool.get(field.model_name)
else:
Target = field.get_target()
if getattr(field, 'datetime_field', None):
for row in result:
if row[fname] is None:
continue
with Transaction().set_context(
_datetime=row[field.datetime_field]):
date_target, = Target.read([row[fname]],
fields_related[fname])
target_id = date_target.pop('id')
fields_related2values[fname].setdefault(target_id, {})
fields_related2values[
fname][target_id][row['id']] = date_target
else:
for target in Target.read(
[r[fname] for r in result if r[fname]],
fields_related[fname]):
target_id = target.pop('id')
fields_related2values[fname].setdefault(target_id, {})
for row in result:
fields_related2values[
fname][target_id][row['id']] = target
elif field._type == 'reference':
for row in result:
if not row[fname]:
continue
model_name, record_id = row[fname].split(',', 1)
if not model_name:
continue
record_id = int(record_id)
if record_id < 0:
continue
Target = pool.get(model_name)
target, = Target.read([record_id], fields_related[fname])
del target['id']
fields_related2values[fname][row[fname]] = target
if to_del or fields_related or datetime_fields:
for row in result:
for fname in fields_related:
if fname not in cls._fields:
continue
field = cls._fields[fname]
for related in fields_related[fname]:
related_name = '%s.%s' % (fname, related)
value = None
if row[fname]:
if field._type in ('many2one', 'one2one'):
value = fields_related2values[fname][
row[fname]][row['id']][related]
elif field._type == 'reference':
model_name, record_id = row[fname
].split(',', 1)
if model_name:
record_id = int(record_id)
if record_id >= 0:
value = fields_related2values[fname][
row[fname]][related]
row[related_name] = value
for field in to_del:
del row[field]
return result
@classmethod
@no_table_query
def write(cls, records, values, *args):
DatabaseIntegrityError = backend.get('DatabaseIntegrityError')
transaction = Transaction()
cursor = transaction.connection.cursor()
pool = Pool()
Translation = pool.get('ir.translation')
Config = pool.get('ir.configuration')
assert not len(args) % 2
# Remove possible duplicates from all records
all_records = list(OrderedDict.fromkeys(
sum(((records, values) + args)[0:None:2], [])))
all_ids = [r.id for r in all_records]
all_field_names = set()
# Call before cursor cache cleaning
trigger_eligibles = cls.trigger_write_get_eligibles(all_records)
super(ModelSQL, cls).write(records, values, *args)
table = cls.__table__()
cls.__check_timestamp(all_ids)
cls.__check_domain_rule(all_ids, 'write', nodomain='write_error')
fields_to_set = {}
actions = iter((records, values) + args)
for records, values in zip(actions, actions):
ids = [r.id for r in records]
values = values.copy()
# Clean values
for key in ('create_uid', 'create_date',
'write_uid', 'write_date', 'id'):
if key in values:
del values[key]
columns = [table.write_uid, table.write_date]
update_values = [transaction.user, CurrentTimestamp()]
store_translation = Transaction().language == Config.get_language()
for fname, value in values.items():
field = cls._fields[fname]
if not hasattr(field, 'set'):
if (not getattr(field, 'translate', False)
or store_translation):
columns.append(Column(table, fname))
update_values.append(field.sql_format(value))
for sub_ids in grouped_slice(ids):
red_sql = reduce_ids(table.id, sub_ids)
try:
cursor.execute(*table.update(columns, update_values,
where=red_sql))
except DatabaseIntegrityError as exception:
transaction = Transaction()
with Transaction().new_transaction(), \
Transaction().set_context(_check_access=False):
cls.__raise_integrity_error(
exception, values, list(values.keys()),
transaction=transaction)
raise
for fname, value in values.items():
field = cls._fields[fname]
if (getattr(field, 'translate', False)
and not hasattr(field, 'set')):
Translation.set_ids(
'%s,%s' % (cls.__name__, fname), 'model',
transaction.language, ids, [value] * len(ids))
if hasattr(field, 'set'):
fields_to_set.setdefault(fname, []).extend((ids, value))
field_names = list(values.keys())
cls._update_mptt(field_names, [ids] * len(field_names), values)
all_field_names |= set(field_names)
for fname in sorted(fields_to_set, key=cls.index_set_field):
fargs = fields_to_set[fname]
field = cls._fields[fname]
field.set(cls, fname, *fargs)
cls._insert_history(all_ids)
cls.__check_domain_rule(all_ids, 'write')
for sub_records in grouped_slice(all_records, cache_size()):
cls._validate(sub_records, field_names=all_field_names)
cls.trigger_write(trigger_eligibles)
@classmethod
@no_table_query
def delete(cls, records):
DatabaseIntegrityError = backend.get('DatabaseIntegrityError')
transaction = Transaction()
cursor = transaction.connection.cursor()
pool = Pool()
Translation = pool.get('ir.translation')
ids = list(map(int, records))
if not ids:
return
table = cls.__table__()
if transaction.delete and transaction.delete.get(cls.__name__):
ids = ids[:]
for del_id in transaction.delete[cls.__name__]:
for i in range(ids.count(del_id)):
ids.remove(del_id)
cls.__check_timestamp(ids)
cls.__check_domain_rule(ids, 'delete')
has_translation = False
tree_ids = {}
for fname, field in cls._fields.items():
if (isinstance(field, fields.Many2One)
and field.model_name == cls.__name__
and field.left and field.right):
tree_ids[fname] = []
for sub_ids in grouped_slice(ids):
where = reduce_ids(field.sql_column(table), sub_ids)
cursor.execute(*table.select(table.id, where=where))
tree_ids[fname] += [x[0] for x in cursor.fetchall()]
if (getattr(field, 'translate', False)
and not hasattr(field, 'set')):
has_translation = True
foreign_keys_tocheck = []
foreign_keys_toupdate = []
foreign_keys_todelete = []
for _, model in pool.iterobject():
if callable(getattr(model, 'table_query', None)):
continue
if not issubclass(model, ModelStorage):
continue
for field_name, field in model._fields.items():
if (isinstance(field, fields.Many2One)
and field.model_name == cls.__name__):
if field.ondelete == 'CASCADE':
foreign_keys_todelete.append((model, field_name))
elif field.ondelete == 'SET NULL':
if field.required:
foreign_keys_tocheck.append((model, field_name))
else:
foreign_keys_toupdate.append((model, field_name))
else:
foreign_keys_tocheck.append((model, field_name))
transaction.delete.setdefault(cls.__name__, set()).update(ids)
cls.trigger_delete(records)
def get_related_records(Model, field_name, sub_ids):
if issubclass(Model, ModelSQL):
foreign_table = Model.__table__()
foreign_red_sql = reduce_ids(
Column(foreign_table, field_name), sub_ids)
cursor.execute(*foreign_table.select(foreign_table.id,
where=foreign_red_sql))
records = Model.browse([x[0] for x in cursor.fetchall()])
else:
with transaction.set_context(active_test=False):
records = Model.search([(field_name, 'in', sub_ids)])
return records
for sub_ids, sub_records in zip(
grouped_slice(ids), grouped_slice(records)):
sub_ids = list(sub_ids)
red_sql = reduce_ids(table.id, sub_ids)
transaction.delete_records.setdefault(cls.__name__,
set()).update(sub_ids)
for Model, field_name in foreign_keys_toupdate:
if (not hasattr(Model, 'search')
or not hasattr(Model, 'write')):
continue
records = get_related_records(Model, field_name, sub_ids)
if records:
Model.write(records, {
field_name: None,
})
for Model, field_name in foreign_keys_todelete:
if (not hasattr(Model, 'search')
or not hasattr(Model, 'delete')):
continue
records = get_related_records(Model, field_name, sub_ids)
if records:
Model.delete(records)
for Model, field_name in foreign_keys_tocheck:
with Transaction().set_context(_check_access=False):
if Model.search([
(field_name, 'in', sub_ids),
], order=[]):
error_args = Model._get_error_args(field_name)
cls.raise_user_error('foreign_model_exist',
error_args=error_args)
super(ModelSQL, cls).delete(list(sub_records))
try:
cursor.execute(*table.delete(where=red_sql))
except DatabaseIntegrityError as exception:
transaction = Transaction()
with Transaction().new_transaction():
cls.__raise_integrity_error(
exception, {}, transaction=transaction)
raise
if has_translation:
Translation.delete_ids(cls.__name__, 'model', ids)
cls._insert_history(ids, deleted=True)
cls._update_mptt(list(tree_ids.keys()), list(tree_ids.values()))
@classmethod
def __check_domain_rule(cls, ids, mode, nodomain=None):
pool = Pool()
Rule = pool.get('ir.rule')
table = cls.__table__()
cursor = Transaction().connection.cursor()
domain = Rule.domain_get(cls.__name__, mode=mode)
tables = {None: (table, None)}
if domain or nodomain:
if domain:
tables, dom_exp = cls.search_domain(
domain, active_test=False, tables=tables)
from_ = convert_from(None, tables)
for sub_ids in grouped_slice(ids):
sub_ids = list(set(sub_ids))
where = reduce_ids(table.id, sub_ids)
if domain:
where &= dom_exp
cursor.execute(*from_.select(table.id, where=where))
rowcount = cursor.rowcount
if rowcount == -1 or rowcount is None:
rowcount = len(cursor.fetchall())
if rowcount != len(sub_ids):
if domain:
cls.raise_user_error('access_error', cls.__name__)
else:
cls.raise_user_error(nodomain, cls.__name__)
@classmethod
def search(cls, domain, offset=0, limit=None, order=None, count=False,
query=False):
pool = Pool()
Rule = pool.get('ir.rule')
transaction = Transaction()
cursor = transaction.connection.cursor()
super(ModelSQL, cls).search(
domain, offset=offset, limit=limit, order=order, count=count)
# Get domain clauses
tables, expression = cls.search_domain(domain)
# Get order by
order_by = []
order_types = {
'DESC': Desc,
'ASC': Asc,
}
null_ordering_types = {
'NULLS FIRST': NullsFirst,
'NULLS LAST': NullsLast,
None: lambda _: _
}
if order is None or order is False:
order = cls._order
for oexpr, otype in order:
fname, _, extra_expr = oexpr.partition('.')
field = cls._fields[fname]
otype = otype.upper()
try:
otype, null_ordering = otype.split(' ', 1)
except ValueError:
null_ordering = None
Order = order_types[otype]
NullOrdering = null_ordering_types[null_ordering]
forder = field.convert_order(oexpr, tables, cls)
order_by.extend((NullOrdering(Order(o)) for o in forder))
# construct a clause for the rules :
domain = Rule.domain_get(cls.__name__, mode='read')
if domain:
tables, dom_exp = cls.search_domain(
domain, active_test=False, tables=tables)
expression &= dom_exp
main_table, _ = tables[None]
table = convert_from(None, tables)
if count:
cursor.execute(*table.select(Count(Literal('*')),
where=expression, limit=limit, offset=offset))
return cursor.fetchone()[0]
# execute the "main" query to fetch the ids we were searching for
columns = [main_table.id.as_('id')]
if (cls._history and transaction.context.get('_datetime')
and not query):
columns.append(Coalesce(
main_table.write_date,
main_table.create_date).as_('_datetime'))
columns.append(Column(main_table, '__id').as_('__id'))
if not query:
columns += [f.sql_column(main_table).as_(n)
for n, f in cls._fields.items()
if not hasattr(f, 'get')
and n != 'id'
and not getattr(f, 'translate', False)
and f.loading == 'eager']
if not callable(cls.table_query):
sql_type = fields.Char('timestamp').sql_type().base
columns += [Extract('EPOCH',
Coalesce(main_table.write_date, main_table.create_date)
).cast(sql_type).as_('_timestamp')]
select = table.select(*columns,
where=expression, order_by=order_by, limit=limit, offset=offset)
if query:
return select
cursor.execute(*select)
rows = list(cursor_dict(cursor, transaction.database.IN_MAX))
cache = transaction.get_cache()
if cls.__name__ not in cache:
cache[cls.__name__] = LRUDict(cache_size())
delete_records = transaction.delete_records.setdefault(cls.__name__,
set())
def filter_history(rows):
if not (cls._history and transaction.context.get('_datetime')):
return rows
def history_key(row):
return row['_datetime'], row['__id']
ids_history = {}
for row in rows:
key = history_key(row)
if row['id'] in ids_history:
if key < ids_history[row['id']]:
continue
ids_history[row['id']] = key
to_delete = set()
history = cls.__table_history__()
for sub_ids in grouped_slice([r['id'] for r in rows]):
where = reduce_ids(history.id, sub_ids)
cursor.execute(*history.select(
history.id.as_('id'),
history.write_date.as_('write_date'),
where=where
& (history.write_date != Null)
& (history.create_date == Null)
& (history.write_date
<= transaction.context['_datetime'])))
for deleted_id, delete_date in cursor.fetchall():
history_date, _ = ids_history[deleted_id]
if isinstance(history_date, str):
strptime = datetime.datetime.strptime
format_ = '%Y-%m-%d %H:%M:%S.%f'
history_date = strptime(history_date, format_)
if history_date <= delete_date:
to_delete.add(deleted_id)
return filter(lambda r: history_key(r) == ids_history[r['id']]
and r['id'] not in to_delete, rows)
# Can not cache the history value if we are not sure to have fetch all
# the rows for each records
if (not (cls._history and transaction.context.get('_datetime'))
or len(rows) < transaction.database.IN_MAX):
rows = list(filter_history(rows))
keys = None
for data in islice(rows, 0, cache.size_limit):
if data['id'] in delete_records:
continue
if keys is None:
keys = list(data.keys())
for k in keys[:]:
if k in ('_timestamp', '_datetime', '__id'):
keys.remove(k)
continue
field = cls._fields[k]
if not getattr(field, 'datetime_field', None):
keys.remove(k)
continue
for k in keys:
del data[k]
cache[cls.__name__].setdefault(data['id'], {}).update(data)
if len(rows) >= transaction.database.IN_MAX:
if (cls._history
and transaction.context.get('_datetime')
and not query):
columns = columns[:3]
else:
columns = columns[:1]
cursor.execute(*table.select(*columns,
where=expression, order_by=order_by,
limit=limit, offset=offset))
rows = filter_history(list(cursor_dict(cursor)))
return cls.browse([x['id'] for x in rows])
@classmethod
def search_domain(cls, domain, active_test=True, tables=None):
'''
Return SQL tables and expression
Set active_test to add it.
'''
transaction = Transaction()
domain = cls._search_domain_active(domain, active_test=active_test)
if tables is None:
tables = {}
if None not in tables:
if cls._history and transaction.context.get('_datetime'):
tables[None] = (cls.__table_history__(), None)
else:
tables[None] = (cls.__table__(), None)
def convert(domain):
if is_leaf(domain):
fname = domain[0].split('.', 1)[0]
field = cls._fields[fname]
expression = field.convert_domain(domain, tables, cls)
if not isinstance(expression, (Operator, Expression)):
return convert(expression)
return expression
elif not domain or list(domain) in (['OR'], ['AND']):
return Literal(True)
elif domain[0] == 'OR':
return Or((convert(d) for d in domain[1:]))
else:
return And((convert(d) for d in (
domain[1:] if domain[0] == 'AND' else domain)))
expression = convert(domain)
if cls._history and transaction.context.get('_datetime'):
table, _ = tables[None]
expression &= (Coalesce(table.write_date, table.create_date)
<= transaction.context['_datetime'])
return tables, expression
@classmethod
def _update_mptt(cls, field_names, list_ids, values=None):
cursor = Transaction().connection.cursor()
for field_name, ids in zip(field_names, list_ids):
field = cls._fields[field_name]
if (isinstance(field, fields.Many2One)
and field.model_name == cls.__name__
and field.left and field.right):
if (values is not None
and (field.left in values or field.right in values)):
raise Exception('ValidateError',
'You can not update fields: "%s", "%s"' %
(field.left, field.right))
# Nested creation require a rebuild
# because initial values are 0
# and thus _update_tree can not find the children
table = cls.__table__()
parent = cls.__table__()
cursor.execute(*table.join(parent,
condition=Column(table, field_name) == parent.id
).select(table.id,
where=(Column(parent, field.left) == 0)
& (Column(parent, field.right) == 0),
limit=1))
nested_create = cursor.fetchone()
if not nested_create and len(ids) < 2:
for id_ in ids:
cls._update_tree(id_, field_name,
field.left, field.right)
else:
cls._rebuild_tree(field_name, None, 0)
@classmethod
def _rebuild_tree(cls, parent, parent_id, left):
'''
Rebuild left, right value for the tree.
'''
cursor = Transaction().connection.cursor()
table = cls.__table__()
right = left + 1
cursor.execute(*table.select(table.id,
where=Column(table, parent) == parent_id))
childs = cursor.fetchall()
for child_id, in childs:
right = cls._rebuild_tree(parent, child_id, right)
field = cls._fields[parent]
if parent_id:
cursor.execute(*table.update(
[Column(table, field.left), Column(table, field.right)],
[left, right],
where=table.id == parent_id))
return right + 1
@classmethod
def _update_tree(cls, record_id, field_name, left, right):
'''
Update left, right values for the tree.
Remarks:
- the value (right - left - 1) / 2 will not give
the number of children node
'''
cursor = Transaction().connection.cursor()
table = cls.__table__()
left = Column(table, left)
right = Column(table, right)
field = Column(table, field_name)
cursor.execute(*table.select(left, right, field,
where=table.id == record_id))
fetchone = cursor.fetchone()
if not fetchone:
return
old_left, old_right, parent_id = fetchone
if old_left == old_right == 0:
cursor.execute(*table.select(Max(right),
where=field == Null))
old_left, = cursor.fetchone()
old_left += 1
old_right = old_left + 1
cursor.execute(*table.update([left, right],
[old_left, old_right],
where=table.id == record_id))
size = old_right - old_left + 1
parent_right = 1
if parent_id:
cursor.execute(*table.select(right, where=table.id == parent_id))
parent_right = cursor.fetchone()[0]
else:
cursor.execute(*table.select(Max(right), where=field == Null))
fetchone = cursor.fetchone()
if fetchone:
parent_right = fetchone[0] + 1
cursor.execute(*table.update([left], [left + size],
where=left >= parent_right))
cursor.execute(*table.update([right], [right + size],
where=right >= parent_right))
if old_left < parent_right:
left_delta = parent_right - old_left
right_delta = parent_right - old_left
left_cond = old_left
right_cond = old_right
else:
left_delta = parent_right - old_left - size
right_delta = parent_right - old_left - size
left_cond = old_left + size
right_cond = old_right + size
cursor.execute(*table.update([left, right],
[left + left_delta, right + right_delta],
where=(left >= left_cond) & (right <= right_cond)))
@classmethod
def validate(cls, records):
super(ModelSQL, cls).validate(records)
transaction = Transaction()
database = transaction.database
connection = transaction.connection
has_constraint = database.has_constraint
lock = database.lock
cursor = transaction.connection.cursor()
# Works only for a single transaction
ids = list(map(int, records))
for _, sql, error in cls._sql_constraints:
if has_constraint(sql):
continue
table = sql.table
if isinstance(sql, (Unique, Exclude)):
lock(connection, cls._table)
columns = list(sql.columns)
columns.insert(0, table.id)
in_max = transaction.database.IN_MAX // (len(columns) + 1)
for sub_ids in grouped_slice(ids, in_max):
where = reduce_ids(table.id, sub_ids)
if isinstance(sql, Exclude) and sql.where:
where &= sql.where
cursor.execute(*table.select(*columns, where=where))
where = Literal(False)
for row in cursor.fetchall():
clause = table.id != row[0]
for column, operator, value in zip(
sql.columns, sql.operators, row[1:]):
if value is None:
# NULL is always unique
clause &= Literal(False)
clause &= operator(column, value)
where |= clause
if isinstance(sql, Exclude) and sql.where:
where &= sql.where
cursor.execute(
*table.select(table.id, where=where, limit=1))
if cursor.fetchone():
cls.raise_user_error(error)
elif isinstance(sql, Check):
for sub_ids in grouped_slice(ids):
red_sql = reduce_ids(table.id, sub_ids)
cursor.execute(*table.select(table.id,
where=~sql.expression & red_sql,
limit=1))
if cursor.fetchone():
cls.raise_user_error(error)
def convert_from(table, tables):
# Don't nested joins as SQLite doesn't support
right, condition = tables[None]
if table:
table = table.join(right, 'LEFT', condition)
else:
table = right
for k, sub_tables in tables.items():
if k is None:
continue
table = convert_from(table, sub_tables)
return table
|
[
"davidoff.d777@gmail.com"
] |
davidoff.d777@gmail.com
|
b71eeb68d50f96ca1b3d4b185d6a8abb13b6cd2f
|
ceecb47aff5436666847866d947cb36438ca409b
|
/linktv/build/lib/linktv/spiders/linktv.py
|
08114c7cf7247f38b0f5fdfc5fdbc5e48b7155f4
|
[] |
no_license
|
jinserk/muffin
|
87865b829a47dd64fd84734e27ea83d4fcfa0dc5
|
9aa30bfa35a8a2b1405b62d1f04d5f77194735c1
|
refs/heads/master
| 2021-01-12T14:36:06.265577
| 2017-01-12T19:07:22
| 2017-01-12T19:07:22
| 72,035,064
| 0
| 0
| null | 2016-10-26T19:00:10
| 2016-10-26T19:00:10
| null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
# -*- coding: utf-8 -*-
from scrapy.spiders import Spider, Rule
from scrapy import Selector, Request
from linktv.items import LinkTvItem
from urllib.parse import unquote
programs = [
{ "name": "JTBC 뉴스룸",
"keyword": "jtbc 뉴스룸",
},
{ "name": "JTBC 이규연의 스포트라이트",
"keyword": "이규연의 스포트라이트",
},
{ "name": "JTBC 썰전",
"keyword": "썰전",
},
{ "name": "SBS 그것이 알고싶다",
"keyword": "그것이 알고 싶다 -",
},
]
class LinkTvSpider(Spider):
name = "linktv"
allowed_domains = ["linktv.us"]
def start_requests(self):
for program in programs:
name = program["name"]
url = "http://linktv.us/cast/search/q/1|{}|0/page/1".format(program["keyword"])
yield Request(url=url, meta={'name': name}, callback=self.parse_program)
def parse_program(self, response):
name = response.meta['name']
hxs = Selector(response)
urls = hxs.xpath('//a[@class="list-group-item"]')
for url in urls:
date = url.xpath('span[@class="pull-right text-muted small"]/em/text()').extract()
link = url.xpath("@href").extract()
url = 'http://linktv.us{}'.format(''.join(link))
yield Request(url=url, meta={'name': name, 'date':date}, callback=self.parse_link)
def parse_link(self, response):
name = response.meta['name']
date = response.meta['date']
hxs = Selector(response)
urls = hxs.xpath('//a[@class="btn btn-info btn-outline btn-block"]').xpath("@href").extract()
links = [unquote(url.split('=')[-1]) for url in urls]
item = LinkTvItem()
item['name'] = name
item['date'] = date
item['link'] = links
return item
|
[
"jinserk.baik@gmail.com"
] |
jinserk.baik@gmail.com
|
09fbb02ab8030b7f82df484b5ad485863f4c58ab
|
da2dd7e904e3bbe9d3b8b13e6fdf3b573f463569
|
/crypto_tools/mersenne_twister_rng.py
|
bfcc92de84db98e8b0dc815ce9a2689a1bc59e4c
|
[] |
no_license
|
c60cb859/cryptopals
|
35430f87f9dbe9a8817f24aca6dd77cec4c6a01d
|
8b4202189005943d6b102c5cafa1382a2e1cf191
|
refs/heads/master
| 2021-05-26T07:41:38.545991
| 2020-02-27T11:48:38
| 2020-02-27T11:48:38
| 127,932,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
#!/usr/bin/env python3
class MersenneTwister19937:
def __init__(self, seed=5489):
(self.w, self.n, self.m, self.r) = (32, 624, 397, 31)
self.a = 0x9908B0DF
(self.u, self.d) = (11, 0xFFFFFFFF)
(self.s, self.b) = (7, 0x9D2C5680)
(self.t, self.c) = (15, 0xEFC60000)
self.l = 18
self.f = 1812433253
# masks (to apply with an '&' operator)
# ---------------------------------------
# zeroes out all bits except "the w-r highest bits"
# (i.e. with our parameters the single highest bit, since w-r=1)
self.high_mask = ((1 << self.w) - 1) - ((1 << self.r) - 1)
# zeroes out all bits excepts "the r lowest bits"
self.low_mask = (1 << self.r) - 1
self.state = list()
self.state.append(seed)
for i in range(1, self.n):
prev = self.state[-1]
# the "& d" is to take only the lowest 32 bits of the result
x = (self.f * (prev ^ (prev >> (self.w - 2))) + i) & self.d
self.state.append(x)
def twist(self, x):
if x % 2 == 1:
return (x >> 1) ^ self.a
return x >> 1
def __iter__(self):
return self
def __next__(self):
while True:
x = self.state[self.m] ^ self.twist((self.state[0] & self.high_mask) + (self.state[1] & self.low_mask))
# tempering transform and output
y = x ^ ((x >> self.u) & self.d)
y = y ^ ((y << self.s) & self.b)
y = y ^ ((y << self.t) & self.c)
y = y ^ (y >> self.l)
# note that it's the 'x' value
# that we insert in the state
self.state.pop(0)
self.state.append(x)
return y
|
[
"theis.christensen@pentest.ngs"
] |
theis.christensen@pentest.ngs
|
73da815963b6122dd39391ee1e80d2ff67b5aa34
|
1df6bfec4e2edf134f3ae8283707309db06733de
|
/wsgi.py
|
a0c796789a151cf5aaf869d2111223942c1a2b84
|
[] |
no_license
|
stevexxs/pythia
|
6ec03c62dfd415edcc6aff561021de051a605586
|
55abe950e4c6ae76b92262f1a1ec419dcf2fdb9b
|
refs/heads/master
| 2023-06-04T14:39:27.609006
| 2019-05-03T07:11:37
| 2019-05-03T07:11:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
from frontend import create_app
app = create_app()
|
[
"b.raml@gmx.at"
] |
b.raml@gmx.at
|
938d4d33647520d61fcc953ba447d54b7d39c433
|
e5262127f206d3be9e5ff8eca492e2486faba664
|
/tests/test_yggdrasil.py
|
4a1c1b1efbfc1cd6c2c41f9a5e2d994879caff15
|
[] |
no_license
|
zerophiel/yggdrasil
|
49243f05c7db4585a9dbaa4cdeaf9388224dad5c
|
92306cfb818905f1ebf22b6335499ab8ecbb9e1c
|
refs/heads/master
| 2023-02-16T01:10:32.757974
| 2021-01-05T10:24:55
| 2021-01-05T10:24:55
| 326,955,644
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from yggdrasil import __version__
def test_version():
assert __version__ == '0.1.0'
|
[
"danuarta.wiratama@gdn-commerce.com"
] |
danuarta.wiratama@gdn-commerce.com
|
8175ba5b64c0cc6a4d580ae71756d221b1c17067
|
d2781dd08e2daff0c0917c8586d01c68fa930c7b
|
/absortium/tests/unit/test_withdrawal.py
|
3774a57ee850fcc6db18224ff363e9cdf24614ff
|
[] |
no_license
|
absortium/backend
|
0ce377f987371a3726f5a64caebfd7ed8e257f1b
|
39bc7118be34db7aefe5f80bd4225a4b22750224
|
refs/heads/master
| 2021-01-21T04:39:54.245505
| 2016-07-23T08:38:35
| 2016-07-23T08:38:35
| 55,866,077
| 0
| 1
| null | 2016-07-21T14:54:38
| 2016-04-09T20:42:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
__author__ = 'andrew.shvv@gmail.com'
from django.contrib.auth import get_user_model
from rest_framework.status import HTTP_404_NOT_FOUND
from core.utils.logging import getLogger
from absortium.tests.base import AbsoritumUnitTest
logger = getLogger(__name__)
class WithdrawalTest(AbsoritumUnitTest):
def setUp(self):
super().setUp()
self.flush_bitcoin_client_operations()
self.flush_ethereum_client_operations()
def test_precision_btc(self, *args, **kwargs):
account = self.get_account('btc')
self.make_deposit(account, "10")
self.make_withdrawal(account, "10")
self.check_account_amount(account, "0")
self.make_deposit(account, "0.1")
self.make_withdrawal(account, "0.1")
self.check_account_amount(account, "0")
def test_precision_eth(self, *args, **kwargs):
account = self.get_account('eth')
self.make_deposit(account, "10")
self.make_withdrawal(account, "10")
self.check_account_amount(account, "0")
self.make_deposit(account, "0.1")
self.make_withdrawal(account, "0.1")
self.check_account_amount(account, "0")
def test_smaller_than_min(self):
account = self.get_account('btc')
self.make_deposit(account, "1")
with self.assertRaises(AssertionError):
self.make_withdrawal(account, "0.000000001")
def test_permissions(self, *args, **kwargs):
account = self.get_account('btc')
self.make_deposit(account)
withdrawal = self.make_withdrawal(account)
# Create hacker user
User = get_user_model()
hacker = User(username="hacker")
hacker.save()
# Authenticate hacker
self.client.force_authenticate(hacker)
# Try to get withdrawal info from another account
url = '/api/withdrawals/{pk}/'.format(pk=withdrawal['pk'])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, HTTP_404_NOT_FOUND)
def test_withdrawal_without_money(self):
account = self.get_account('btc')
with self.assertRaises(AssertionError):
self.make_withdrawal(account)
def test_malformed(self, *args, **kwargs):
account = self.get_account('btc')
malformed_amount = "asdmnajsid"
with self.assertRaises(AssertionError):
self.make_withdrawal(account, amount=malformed_amount)
malformed_amount = "-1"
with self.assertRaises(AssertionError):
self.make_withdrawal(account, amount=malformed_amount)
def test_send_btc(self, *args, **kwargs):
account = self.get_account('btc')
self.make_deposit(account, "10")
self.make_withdrawal(account, "10")
self.check_account_amount(account, "0")
self.assertEqual(len(self.get_bitcoin_wallet_operations()), 1)
def test_send_eth(self, *args, **kwargs):
account = self.get_account('eth')
self.make_deposit(account, "10")
self.make_withdrawal(account, "10")
self.check_account_amount(account, "0")
self.assertEqual(len(self.get_ethereum_wallet_operations()), 1)
|
[
"andrew.shvv@gmail.com"
] |
andrew.shvv@gmail.com
|
698b888ca730050b7d005f2c4393960ce3de2414
|
7b1ffeb62370b70d5890ec7d2abc6f36f557e2d0
|
/pygame1.py
|
fe9c486327f0478b114f8d1de356c4502b8f23f5
|
[] |
no_license
|
avinash317-tech/Python-Car-Game
|
fa96eb05458d537b561fa25c47307220a3126b35
|
b2aa4fe34613037c2338601a68eb2db93357e237
|
refs/heads/main
| 2023-08-16T01:02:41.816485
| 2021-10-18T05:47:29
| 2021-10-18T05:47:29
| 418,224,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,153
|
py
|
import pygame
import time
import random
pygame.init()
#############
crash_sound = pygame.mixer.Sound("crash.wav")
#############
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (200, 0, 0)
green = (0, 200, 0)
bright_red = (255, 0, 0)
bright_green = (0, 255, 0)
block_color = (53, 115, 255)
car_width = 73
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('car2.png')
gameIcon = pygame.image.load('car2.png')
pygame.display.set_icon(gameIcon)
pause = False
# crash = True
def things_dodged(count):
font = pygame.font.SysFont("comicsansms", 25)
text = font.render("Dodged: " + str(count), True, black)
gameDisplay.blit(text, (0, 0))
def things(thingx, thingy, thingw, thingh, color):
pygame.draw.rect(gameDisplay, color, [thingx, thingy, thingw, thingh])
def car(x, y):
gameDisplay.blit(carImg, (x, y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def crash():
####################################
pygame.mixer.Sound.play(crash_sound)
pygame.mixer.music.stop()
####################################
largeText = pygame.font.SysFont("comicsansms", 115)
TextSurf, TextRect = text_objects("You Crashed", largeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
button("Play Again", 150, 450, 100, 50, green, bright_green, game_loop)
button("Quit", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(gameDisplay, ac, (x, y, w, h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(gameDisplay, ic, (x, y, w, h))
smallText = pygame.font.SysFont("comicsansms", 20)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ((x + (w / 2)), (y + (h / 2)))
gameDisplay.blit(textSurf, textRect)
def quitgame():
pygame.quit()
quit()
def unpause():
global pause
pygame.mixer.music.unpause()
pause = False
def paused():
############
pygame.mixer.music.pause()
#############
largeText = pygame.font.SysFont("comicsansms", 115)
TextSurf, TextRect = text_objects("Paused", largeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
while pause:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
button("Continue", 150, 450, 100, 50, green, bright_green, unpause)
button("Quit", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
largeText = pygame.font.SysFont("comicsansms", 115)
TextSurf, TextRect = text_objects("A bit Racey", largeText)
TextRect.center = ((display_width / 2), (display_height / 2))
gameDisplay.blit(TextSurf, TextRect)
button("GO!", 150, 450, 100, 50, green, bright_green, game_loop)
button("Quit", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def game_loop():
global pause
############
pygame.mixer.music.load('back1.mp3')
pygame.mixer.music.play(-1)
############
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
thing_startx = random.randrange(0, display_width)
thing_starty = -600
thing_speed = 4
thing_width = 100
thing_height = 100
thingCount = 1
dodged = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if event.key == pygame.K_p:
pause = True
paused()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
things(thing_startx, thing_starty, thing_width, thing_height, block_color)
thing_starty += thing_speed
car(x, y)
things_dodged(dodged)
if x > display_width - car_width or x < 0:
crash()
if thing_starty > display_height:
thing_starty = 0 - thing_height
thing_startx = random.randrange(0, display_width)
dodged += 1
thing_speed += 1
thing_width += (dodged * 1.2)
if y < thing_starty + thing_height:
print('y crossover')
if x > thing_startx and x < thing_startx + thing_width or x + car_width > thing_startx and x + car_width < thing_startx + thing_width:
print('x crossover')
crash()
pygame.display.update()
clock.tick(60)
game_intro()
game_loop()
pygame.quit()
quit()
|
[
"noreply@github.com"
] |
avinash317-tech.noreply@github.com
|
7172130cab710add3a596cbcab9a3103ab2dd3ee
|
40a1ca8ddbdcd96a58703913f98b29b435a42745
|
/anagram.py
|
a805ced2f202e63172f8bb33fb011b4b390d8811
|
[] |
no_license
|
GaganDureja/Algorithm-practice
|
3eaca2cfc03fcee3671b87b5efda1f950fd36212
|
d40e08287754594d016801a093becc3f69f4bcc1
|
refs/heads/master
| 2023-05-06T11:58:35.471799
| 2021-06-01T03:49:58
| 2021-06-01T03:49:58
| 292,361,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# This problem was asked by Google.
# Given a word W and a string S, find all starting indices in S which are anagrams of W.
# For example, given that W is "ab", and S is "abxaba", return 0, 3, and 4.
def anagram(w,s):
res = []
for x in range(len(s)):
word = s[x:x+len(w)]
if word==w or word==w[::-1]:
res.append(x)
return res
print(anagram('ab','abxaba'))
|
[
"gagandureja675@gmail.com"
] |
gagandureja675@gmail.com
|
972072b1a04087179602c473ad273ca40eae9021
|
fd39c53fe453616b0226deebc723c71089cb116f
|
/app/api/V2/user_model.py
|
11d6ea8aaedf037381f9fb50983d76d187e240e0
|
[] |
no_license
|
ogol254/myblog
|
e31921ba4980d494038b7d71b8f05988a37e39cc
|
b993da3833a369edb9bc60ba3a98fc5452a01477
|
refs/heads/master
| 2020-04-17T04:15:52.290793
| 2019-02-21T14:49:50
| 2019-02-21T14:49:50
| 166,222,061
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
from ...database_config import init_db
from basemodel import BaseModel
class UserModel(BaseModel):
"""docstring for UserModel"""
def __init__(self, name="name", email="email", password="password", username="username"):
self.name = name
self.email = email
self.password = password
self.username = username
# method to save user data
def save(self):
user = {
"name": self.name,
"username": self.username,
"email": self.email,
"password": self.password
}
con = init_db()
cur = con.cursor()
if BaseModel().check_exist('users', 'email', self.email) == True:
return "user already exists"
query = """ INSERT INTO users (name, username, email, password) VALUES \
( %(name)s, %(username)s, %(email)s, %(password)s) RETURNING user_id """
cur.execute(query, user)
user_id = cur.fetchone()[0]
con.commit()
con.close()
return user_id
def logout(self, token):
con = init_db()
cur = con.cursor()
query = "INSERT INTO blacklist (tokens) VALUES ('{}');".format(token)
cur.execute(query)
con.commit()
cur.close()
def get_user_by_username(self, username):
"""return user from the db given a username"""
database = init_db()
curr = database.cursor()
curr.execute(
"""SELECT user_id, password \
FROM users WHERE username = '%s'""" % (username))
data = curr.fetchone()
curr.close()
return data
|
[
"abramogol@gmail.com"
] |
abramogol@gmail.com
|
5b09ee0528a4588f11696753640e5c1ab43f86da
|
62e08ad817198ec770a2be7e230c4b39902632bf
|
/DataScienceChallenge6/supermarché/supermarche/supermarche/spiders/superSpider.py
|
f95e00033f4da7796d15c9c7d48f0ae21b6a4591
|
[] |
no_license
|
mgirardot/DataScienceChallenges
|
c13dd04a07329ec2f3405c095ad0376b6e97c7c0
|
b115b3e74b35593b789a6f1e36d262a0ad798c26
|
refs/heads/master
| 2021-01-15T08:49:23.105573
| 2017-10-09T04:20:22
| 2017-10-09T04:20:22
| 68,781,459
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,538
|
py
|
import scrapy
from scrapy import Spider
from scrapy.selector import Selector
from supermarche.items import SupermarcheItem
from scrapy.http import HtmlResponse
class SupermarcheSpider(Spider):
name="supermarche"
allowed_domains = []
#urls = ["http://www.750g.com/recettes_plats.htm?page="+str(x) for x in range(2,2186)]
start_urls = ["http://courses.carrefour.fr/drive/tous-les-rayons/fruits-et-legumes/fruits/PID0/1785471",
"http://courses.carrefour.fr/drive/tous-les-rayons/fruits-et-legumes/legumes/PID0/1785457",
"http://courses.carrefour.fr/drive/tous-les-rayons/fruits-et-legumes/fruits-secs-et-legumes-secs/PID0/1785470",
"http://courses.carrefour.fr/drive/tous-les-rayons/viandes-et-poissons/rotisserie/PID0/1890458",
"http://courses.carrefour.fr/drive/tous-les-rayons/viandes-et-poissons/boucherie/PID0/1785554",
"http://courses.carrefour.fr/drive/tous-les-rayons/viandes-et-poissons/volaille/PID0/1785551",
"http://courses.carrefour.fr/drive/tous-les-rayons/viandes-et-poissons/poissonnerie/PID0/1785563",
"http://courses.carrefour.fr/drive/tous-les-rayons/pains-et-patisserie/boulangerie/PID0/1785573",
"http://courses.carrefour.fr/drive/tous-les-rayons/pains-et-patisserie/viennoiserie/PID0/1785574",
"http://courses.carrefour.fr/drive/tous-les-rayons/pains-et-patisserie/patisserie/PID0/1785575",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/beurres-et-margarines/PID0/1785213",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/cremes-fraiches/PID0/1785188",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/oeufs/PID0/1785197",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/lait-et-boissons-lactees/PID0/1785205",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-a-la-coupe/PID0/1785178",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-d-hiver/PID0/1785187",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/camemberts-coulommiers-et-bries/PID0/1785173",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-de-caractere/PID0/1879060",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-rapes/PID0/1785174",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-enfants/PID0/1785175",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/roqueforts-et-bleus/PID0/1785176",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/chevres-et-brebis/PID0/1785177",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/gruyeres-et-comtes/PID0/1785184",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/gruyeres-et-comtes/PID0/1785184",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-a-tartiner/PID0/1785185",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/fromages/fromages-en-tranches-pour-salade-et-aperitif/PID0/1785186",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-nature-et-fromages-blancs/PID0/1785204",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-aux-fruits-et-aromatises/PID0/1785199",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-enfants/PID0/1785200",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-a-boire/PID0/1785201",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-sante-et-minceur/PID0/1785202",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/yaourts/yaourts-bio-et-soja/PID0/1785203",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/desserts/cremes-desserts/PID0/1785193",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/desserts/mousses-et-liegeois/PID0/1785194",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/desserts/flans-riz-et-semoule/PID0/1785195",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/desserts/desserts-patissiers/PID0/1785196",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/compotes-fraiches/PID0/1785211",
"http://courses.carrefour.fr/drive/tous-les-rayons/cremerie/jus-de-fruits-frais/PID0/1785212",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/charcuterie-a-la-coupe/PID0/1785093",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/foies-gras-rillettes-et-pates/PID0/1785092",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/jambons-blancs/PID0/1785097",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/jambons-de-volaille/PID0/1785098",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/jambons-crus-et-charcuterie-tranchee/PID0/1785099",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/lardons-et-des/PID0/1785100",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/knacks/PID0/1785101",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/saucisses-et-boudins/PID0/1785102",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/charcuterie/saucissons/PID0/1785103",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/saveurs-d-asie/PID0/2047958",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/traiteur-traditionnel/PID0/1785074",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/aperitif-dinatoire/PID0/1785080",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/crudites-et-taboules/PID0/1785081",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/snacks-croques-et-galettes/PID0/1785082",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/plats-cuisines-et-soja/PID0/1785083",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/pizza-et-quiches/PID0/1785071",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/cordons-bleus-et-nuggets/PID0/1785072",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/pates-fraiches-et-gnocchis/PID0/1785073",
"http://courses.carrefour.fr/drive/tous-les-rayons/charcuterie-traiteur/traiteur/pates-a-tartes/PID0/1785079",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/aperitifs-entrees-et-snacks/PID0/1785225",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/pizzas-et-tartes/PID0/1785237",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/plats-cuisines/PID0/1785243",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/legumes-et-frites/PID0/1785221",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/steaks-haches/PID0/1785235",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/panes-et-volailles/PID0/1785236",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/poissons/PID0/1785238",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/patisseries-surgelees/PID0/1785218",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/glaces/cremes-glacees-et-sorbets-en-bac/PID0/1785230",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/glaces/pots-de-glace/PID0/1785231",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/glaces/cones-et-batonnets/PID0/1785232",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/glaces/barres-glacees/PID0/1785233",
"http://courses.carrefour.fr/drive/tous-les-rayons/surgeles/glaces/glaces-enfant/PID0/1785234",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pour-l-aperitif/chips/PID0/1785313",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pour-l-aperitif/biscuits-aperitif/PID0/1785314",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pour-l-aperitif/cacahuetes-pistaches-/PID0/1785315",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pour-l-aperitif/olives-et-tartinables/PID0/1785316",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/soupes-et-croutons/briques/PID0/1785287",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/soupes-et-croutons/soupes-deshydratees/PID0/1785288",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/soupes-et-croutons/croutons-et-accompagnements/PID0/1785289",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/plats-individuels/PID0/1785306",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/raviolis-et-pates/PID0/1785302",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/couscous-et-taboules/PID0/1785303",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/cassoulets/PID0/1785304",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/cassoulets/PID0/1785304",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/les-plats-cuisines/choucroutes-petits-sales-etc-/PID0/1785305",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/mais-asperges-palmiers-/PID0/1785281",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/ratatouilles-et-legumes-cuisines/PID0/1785282",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/champignons-et-tomates/PID0/1785283",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/flageolets-et-legumes-secs/PID0/1785284",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/petits-pois-et-jardinieres/PID0/1785278",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/haricots-verts-et-legumes-verts/PID0/1785279",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-legumes/salsifis-marrons-et-autres-legumes/PID0/1785280",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-poisson/thon/PID0/1785274",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-poisson/sardines-et-maquereaux/PID0/1785275",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/conserves-de-poisson/autres-conserves-de-la-mer/PID0/1785276",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/conserves/pates-et-foies-gras/PID0/1785285",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/pates-longues-spaghettis-tagliatelles-etc-/PID0/1785292",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/pates-longues-spaghettis-tagliatelles-etc-/PID0/1785292",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/pates-courtes-coquillettes-tortis-etc-/PID0/1785293",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/pates-a-farcir-lasagnes-canellonis-etc-/PID0/1785294",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/pates-cuisson-rapide/PID0/1785295",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pates-et-sauces-pour-les-pates/sauces-pour-les-pates/PID0/1785291",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/riz-puree-ble-et-semoule/riz/PID0/1785250",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/riz-puree-ble-et-semoule/sachets-express/PID0/1785251",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/riz-puree-ble-et-semoule/purees/PID0/1785252",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/riz-puree-ble-et-semoule/ble-semoule-et-legumes-secs/PID0/1785253",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/huiles-d-olive/PID0/1785266",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/huile-tournesol-friture-/PID0/1785268",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/vinaigres/PID0/1785270",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/vinaigrettes-et-jus-de-citron/PID0/1785271",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/cornichons-olives-et-condiments/PID0/1785258",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/moutardes/PID0/1785262",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/ketchup-mayonnaises-et-sauces-froides/PID0/1785263",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/burger-bbq-samourai/PID0/1785264",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/sauces-pour-les-pates-et-le-riz/PID0/1785265",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/concentres-et-coulis-de-tomate/PID0/1785267",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/huiles-vinaigres-condiments-et-sauces/bechamels-et-sauces-a-napper/PID0/1785269",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/epices-et-aides-culinaires/sel-et-poivre/PID0/1785308",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/epices-et-aides-culinaires/epices-et-herbes/PID0/1785309",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/epices-et-aides-culinaires/bouillons-et-fonds-de-sauce/PID0/1785310",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/epices-et-aides-culinaires/fecule-et-chapelure/PID0/1785311",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/pains-de-mie/PID0/1785254",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/produits-du-monde/mediterranee/PID0/1785298",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/produits-du-monde/tex-mex-et-usa/PID0/1785299",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/produits-du-monde/asie/PID0/1785300",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-salee/produits-du-monde/halal/PID0/1785297",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/tablettes-chocolat-au-lait-et-blanc/PID0/1785375",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/tablettes-chocolat-noir/PID0/1785376",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/tablettes-chocolat-patissier/PID0/1785377",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/tablettes-chocolat-patissier/PID0/1785377",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/confiseries-chocolatees/PID0/1785378",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/bonbons/PID0/1785381",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/bonbons-et-chocolats/chewings-gums-et-confiseries-de-poche/PID0/1785383",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/cereales-enfants/PID0/1785322",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/cereales-adultes/PID0/1785323",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/mueslis-et-avoines/PID0/2203708",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/barres-cereales/PID0/1785324",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/biscuits-petit-dejeuner/PID0/1785325",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/assortiment-viennoiseries/PID0/1785326",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/biscottes-pains-grilles-et-galettes/PID0/1785327",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/pain-de-mie/PID0/1785328",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/viennoiseries/PID0/2203709",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/confitures/PID0/1785329",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/petit-dejeuner/pates-a-tartiner-et-miels/PID0/1785319",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/cafes-moulus-et-en-grains/PID0/1785348",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/cafes-dosettes/PID0/1785350",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/cafes-solubles-et-capuccinos/PID0/1785343",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/filtres-et-detartrants/PID0/1785344",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/thes/PID0/1785345",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/infusions/PID0/1785346",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/chocolats-en-poudre/PID0/1785347",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/cafes-et-thes/laits-en-poudre-et-concentres/PID0/1785349",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/biscuits-au-chocolat-ou-a-la-vanille/PID0/1785367",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/biscuits-aux-fruits/PID0/1785357",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/biscuits-petit-dejeuner/PID0/1785358",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/cookies/PID0/1785359",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/petits-beurres-et-biscuits-secs/PID0/1785360",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/assortiment-de-biscuits-et-miniardises/PID0/1785361",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/barres-de-cereales-et-chocolatees/PID0/1785362",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/barres-de-cereales-et-chocolatees/PID0/1785362",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/madeleines/PID0/1785363",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/quatre-quarts-et-pains-d-epices/PID0/2203714",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/marbre-et-brownies/PID0/2203715",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/marbre-et-brownies/PID0/2203715",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/gouters-pockets/PID0/2203716",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/biscuits-et-gateaux/biscuits-bio-et-dietetiques/PID0/1785366",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/compotes-fruits-au-sirop-et-cremes-desserts/compotes/PID0/1785333",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/compotes-fruits-au-sirop-et-cremes-desserts/fruits-au-sirop/PID0/1785334",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/compotes-fruits-au-sirop-et-cremes-desserts/cremes-dessert/PID0/1785335",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/sucres-farines-et-preparation-gateaux/sucres-et-edulcorants/PID0/1785337",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/sucres-farines-et-preparation-gateaux/farines/PID0/1785338",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/sucres-farines-et-preparation-gateaux/preparation-pour-gateaux-et-flans/PID0/1785339",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/sucres-farines-et-preparation-gateaux/aide-a-la-patisserie/PID0/1785340",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/sans-gluten/PID0/1785370",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/biscuits-galettes-et-cereales/PID0/2203721",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/boissons/PID0/2203722",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/thes-et-infusions/PID0/2203719",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/edulcorants/PID0/2203720",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/repas-minceur-et-barres/PID0/1785371",
"http://courses.carrefour.fr/drive/tous-les-rayons/epicerie-sucree/dietetique/complements-alimentaires/PID0/1785372",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/eaux/PID0/1785594",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/laits/PID0/2182158",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-d-orange/PID0/1785628",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-de-pamplemousse-agrumes/PID0/2182175",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-multifruits/PID0/1785629",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-de-pommes-et-raisins/PID0/1785624",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/ananas-et-autres-jus-de-fruits/PID0/1785625",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/formats-pockets/PID0/1785626",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-de-tomates-et-legumes/PID0/1785627",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/jus-de-fruits/jus-de-fruits-frais/PID0/2182176",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/colas-et-boissons-gazeuses/PID0/1785589",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/boissons-aux-fruits-et-thes-glaces/PID0/1785630",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/bordeaux-et-sud-ouest/PID0/1785610",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/vallee-du-rhone/PID0/1785611",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/bourgogne-et-beaujolais/PID0/1785612",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/languedoc-provence-et-autres/PID0/1785613",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/languedoc-provence-et-autres/PID0/1785613",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-rouges/vins-de-pays-et-vins-de-table-rouge/PID0/2182170",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-blancs/PID0/1785599",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-roses/PID0/1785605",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/fontaines-a-vin/PID0/2182169",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/cave-a-vins/vins-etrangers/PID0/2182171",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/champagnes-et-vins-petillants/PID0/1785633",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/bieres-blondes-rafraichissantes/PID0/1785584",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/bieres-blanches-et-aromatisees/PID0/1785585",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/bieres-de-degustation/PID0/1785586",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/bieres-sans-alcool-et-panaches/PID0/1785587",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/cidres/PID0/1785588",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/bieres-et-cidres/futs-pression/PID0/2182166",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/whisky/PID0/1785621",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/pastis-anises/PID0/1785622",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/vin-doux-portos-amers/PID0/1785615",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/rhums/PID0/1785616",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/vodkas-gins-et-tequila/PID0/1785617",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/punch-et-cocktails/PID0/1785618",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/liqueurs-et-digestifs/PID0/1785619",
"http://courses.carrefour.fr/drive/tous-les-rayons/boissons-et-cave-a-vins/aperitifs-et-alcools/aperitifs-sans-alcool/PID0/1785620"
]
def parse(self, response):
self.log('parsing details of: %s' % response.url)
try:
hxs = Selector(response)
#open items
item = SupermarcheItem()
#collect
marques = hxs.xpath("//body/div[@id='allContainer']/div[@id='cf-main']/div[@id='Frame1']/div[@id='cf-mainWrap1']/div[@id='cf-mainWrap2']/div[@id='cf-mainWrap3']/div[@id='cf-mainWrap4']/div[@id='sub-main']/div[@class='gridTwoLeft']/div[@class='wrapper clearfix']/div[@class='content']/div[@class='content t-zone']/div[@class='table productList']/ul/li[@class='product']/div[@class='productWrap ']/div[@class='productHead']/h3/a/span/text()").extract()
quantites = hxs.xpath("//body/div[@id='allContainer']/div[@id='cf-main']/div[@id='Frame1']/div[@id='cf-mainWrap1']/div[@id='cf-mainWrap2']/div[@id='cf-mainWrap3']/div[@id='cf-mainWrap4']/div[@id='sub-main']/div[@class='gridTwoLeft']/div[@class='wrapper clearfix']/div[@class='content']/div[@class='content t-zone']/div[@class='table productList']/ul/li[@class='product']/div/div[@class='productMain clearfix']/div[@class='specs priceSpecs']/span[@class='unit']/text()").extract()
for i in range(len(marques)):
item['marque'] = marques[i]
item['quantite'] = quantites[i]
yield item
except AttributeError:
self.log('No data to extract from : %s' % response.url)
|
[
"michael.girardot@seekpeak-bioinformatics.com"
] |
michael.girardot@seekpeak-bioinformatics.com
|
19008f4be538236c43a869bb96bc9f4d0daba38c
|
ced144c64a0c14fef118fe5f065c314b7086e254
|
/dmart_01/manage.py
|
f5009bc2a34d4dca431c84d675615630bc33aa7c
|
[] |
no_license
|
MaheshwaraVaijapur/dmat_sep_2019
|
b65894a8f445ba7db449a8d2e37ac55208d80859
|
2f45810afa2ea2390fe4d7c267acb2b010439f77
|
refs/heads/master
| 2020-08-01T19:40:14.455945
| 2019-09-26T13:20:27
| 2019-09-26T13:20:27
| 211,095,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dmart_01.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"maheshbca2010@gmail.com"
] |
maheshbca2010@gmail.com
|
1d842b90bbd62ad9a447dee16b344d98a669de77
|
e115b66c4424847b54a860b977ceebe1904ba056
|
/account/urls.py
|
9ddca5f881abe25c3600af336e352412be80ae3c
|
[] |
no_license
|
godwon2095/class_lion_blog
|
f569eb743bf57c1a2c09e50d48fd3e17d42cf27a
|
2659f2aaea9d542ce64d3f28530ca3c30fe82cfb
|
refs/heads/master
| 2020-04-26T18:36:16.295572
| 2019-03-06T15:46:20
| 2019-03-06T15:46:20
| 173,749,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from django import contrib
from django.urls import path
from . import views
urlpatterns = [
path('sign_up/', views.sign_up, name="sign_up"),
path('login/', views.login, name="login"),
path('logout/', views.logout, name="logout"),
]
|
[
"jsw2095@naver.com"
] |
jsw2095@naver.com
|
be40302caea9f370748207f19aff7057559d9750
|
b054e40b3d075608dba92900297d540fd2003388
|
/carshare/migrations/0011_remove_booking_ended.py
|
0d609ef9e8eb7e41c86ba05eb42d2e769e8de965
|
[] |
no_license
|
Plonq/vroom-car-share
|
baf90ddb216560e11e3aba0b5ee2986632dfd55c
|
972030d9c570e68b2ff11092192bba3ea18d39f1
|
refs/heads/master
| 2021-03-19T15:26:15.539797
| 2018-04-02T23:03:44
| 2018-04-02T23:03:44
| 102,409,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-23 02:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('carshare', '0010_auto_20171020_1002'),
]
operations = [
migrations.RemoveField(
model_name='booking',
name='ended',
),
]
|
[
"s3527782@student.rmit.edu.au"
] |
s3527782@student.rmit.edu.au
|
9ee8a0be78745b7d81f293379b4dcffea3619463
|
23c4ada084136d0924c4b2c7ca27df5bdc52a01c
|
/tkping/giftool.py
|
ee2b0e58f73f412c84e3fbd215ac00254e7a2a5e
|
[] |
no_license
|
feht34/python-sample
|
bc3715ca75134f179cc35682be0079fccc92e890
|
471d49ef3ef2f16ca7bf968ab3a0b944b1c2edc4
|
refs/heads/master
| 2021-01-20T06:57:21.771363
| 2015-04-21T04:14:08
| 2015-04-21T04:14:08
| 31,693,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
__author__ = 'tieh'
import tkinter as tk
GIFSTART = \
'R0lGODlhIAAgAPQAAP9rAP9rAf9xCv9yDP90EP92E/93Fv94Fv+BJv+IM/+ZT/+aUf+gW/+vdv+xeP+0fv+3hP+4hP+5hv+7if/F'\
'm//Rr//dxP/dxf/exv/v5P/27//28P/48v/7+P/+/QAAACH5BAEAAB8ALAAAAAAgACAAAAWr4CeOZGmeaKqqldO8cCzPslN9D6Dv'\
'fO//ukcBICgYj8ikMikADAGTlXQ04UWn0uruilVpddwu6gsVe60pTGSDJYdLC8CB4smiUYpdArNyp/I7AQwZKX54PgMRHSeGJ4A+'\
'CBwmjSZxP5GTd46IioyaJo86g4WfJY97faUkcQV0dltpEGxTlGYltbZUqrkfuLy+uVpDRUvFxgVNBTlAzM1BHy000tMvNrzX2CIh'\
'ADs='
GIFSTOP = \
'R0lGODlhIAAgAPIAAP9rAP+CJ/+DKv/EmQAAAAAAAAAAAAAAACH5BAEAAAQALAAAAAAgACAAAANiSLrc/jDKSau9OOsNhwhgKI6B'\
'MFACoK5suwpU4M5tENM4YE9yPu+Snq92GxJ5xmMwyQJGhEwnBJqUPqhGqwM71Da4Pi8DnBMvyDizAk1TE9g/FPNF8ZDuIRNnz+/7'\
'/4CBEgkAOw=='
def showgif():
root = tk.Tk()
p = tk.PhotoImage(data=GIF)
l = tk.Label(image=p)
l.pack()
root.mainloop()
def b64gif():
import base64
with open(r'e:\mat\icon\iconsplace\aqua-view-details-32.gif', 'rb') as f:
sb64 = base64.b64encode(f.read()).decode()
for i in range(0, len(sb64), 100):
print(" '%s'\\" % sb64[i:i+100])
class ImgBtton(tk.Button):
def __init__(self, master=None, *args, **kw):
tk.Button.__init__(self, master, *args, **kw)
self.config(relief=tk.FLAT)
self.config(cursor='hand2')
self.config(compound='left')
self.bind('<Enter>', self.onenter)
self.bind('<Leave>', self.onleave)
self.oldbg = self.cget('bg')
def onenter(self, event):
self.config(bg='#3385FF')
def onleave(self, event):
self.config(bg=self.oldbg)
def showbutton():
root = tk.Tk()
p = tk.PhotoImage(data=GIFSTOP)
btn = ImgBtton(root, text='Start', image=p)
btn.pack()
root.mainloop()
if __name__ == '__main__':
#showgif()
b64gif()
#showbutton()
|
[
"feht@163.com"
] |
feht@163.com
|
678429b9b73a52b1c7a0a63b74d9de48672c29ec
|
f47629de7b55945c8e686a201f635b9d66523605
|
/keras-pipeline.py
|
527f018e2ebba88b34074ec269e96dfc860af9aa
|
[] |
no_license
|
skrillberg/DTSL
|
a71e7469acbc479c216612cc4189d81024ed069f
|
f8994fef1f680474df3ba5399b4e87a10398e848
|
refs/heads/master
| 2021-08-30T04:22:08.153110
| 2017-12-16T01:10:49
| 2017-12-16T01:10:49
| 109,423,456
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,284
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os.path
import datetime as dt
import keras
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import sklearn as sk
import sklearn.preprocessing as proc
from keras.utils import plot_model
import pydot
import tensorflow as tf
import graphviz
# this function expands the data so the lstm is fed short time series sequences
def expand_data(xdata,timesteps):
data_shape=(xdata.shape[0],timesteps,xdata.shape[1]) # define shape of expanded data to include repeated timesteps
x_large = np.zeros(data_shape)
for i in range(timesteps,xdata.shape[0]-1):
for j in range(0,timesteps):
x_large[i,j,:]=xdata[i-timesteps+j,:]
#x_large[i,j,:]=xdata[i-j,:] # reversed version
return x_large
#cleans data by dropping features and creating year, day, hour features and creating y-labels
def clean_data(data):
drop_features=['PCP06','PCP24','SKC','GUS'] # drop dirty or non-important data columns
data_pruned=data.drop(drop_features, axis=1)
data_pruned['DateTime']=pd.to_datetime(data_pruned['DateTime']) # cast the date time column to datetime format
data_pruned=data_pruned.set_index('DateTime') #sets index as a datetime index
data_pruned['DateTime']=data_pruned.index # datetime column is also set to index, i had to do this because DateTime was removed by set_index
data_resampled=data_pruned.resample('H').mean() # resample data by the hour
data_resampled=data_resampled.fillna(method='pad') # fills empty values by filling in with previous values. this needs to be improved
data_resampled['DateTime']=data_resampled.index #creates a DateTime column from the datetime index
#add columns for year, year day, and hour
data_resampled['year'] = data_resampled['DateTime'].apply(lambda x: x.timetuple().tm_year-2014)
data_resampled['y_day'] = data_resampled['DateTime'].apply(lambda x: x.timetuple().tm_yday)
data_resampled['hour'] = data_resampled['DateTime'].apply(lambda x: x.timetuple().tm_hour)
data_resampled=data_resampled.drop('DateTime',axis=1) #drop the datetime column
#shifting data to create y labels
shifted_realtime=data_resampled[['HB_NORTH_RealTime','LZ_RAYBN_RealTime']].shift(-1,freq='24H') #shifts grid data forward 24 hours
shifted_realtime.columns=['HB_NORTH_24H','LZ_RAYBN_24H'] # names columns
#merge input data with y labels to create a full dataset
full_data=pd.merge(data_resampled,shifted_realtime,how='inner',left_index=True,right_index=True)
full_data=full_data.fillna(0) #fill nas with 0
print(full_data.columns)
full_data=full_data.drop(['EB1_MNSES_RealTime','Unnamed: 0','USAF'],axis=1)
return full_data
#function that takes a cleaned dataframe that includes y labels and outputs scaled and normalized
#data that is in the correct format for keras LSTM. Also splits test data
def preprocess_data(data,lookback):
return (x_train,y_train,x_test,y_test)
#####################Loading and Cleaning Data ###############################
##############################################################################
#data=pd.read_csv('../merged_grid_and_weather.csv') # reads merged data
data=pd.read_csv('../all_the_data.csv') # reads merged data
full_data=clean_data(data)
new_data=full_data[['HB_NORTH_RealTime','LZ_RAYBN_RealTime','PCP01','TEMP','GasSpotPrice', 'GasPriceDailyVariation',
'GasTradedVolume', 'OilBarrelPrice', 'year', 'y_day', 'hour',
'HB_NORTH_24H', 'LZ_RAYBN_24H']]
################### Reshaping data so it is compatible with keras ################################
##################################################################################################
scale=1
# reshape data
timesteps=1; #leave this as 1 for now
lookback=1 #the number of hours in the past that the lstm looks at
time=full_data.index #create an index for time that we can use to plot things
x_train=new_data.drop(['HB_NORTH_24H','LZ_RAYBN_24H'],axis=1) # create training data
scaler=proc.StandardScaler().fit(x_train)
#x_train=proc.scale(x_train,axis=0) #scale data so it is zero mean and unit variance
x_train=scaler.transform(x_train)
#x_train=proc.normalize(x_train,axis=0) #normalize data so it is u
x_train=x_train[:24000,:]/scale # only data datapoints up to hour 24000
#TODO: save normalization and scaler so we can apply it consistently to test data
y_train=full_data[['HB_NORTH_24H','LZ_RAYBN_24H']] # create y_train data
#expand data so its dimensions are nsamples X lookback X features
newData=expand_data(x_train,lookback)
#
#x_train=x_train.reshape(x_train.shape[0]/timesteps,timesteps,x_train.shape[1])
y_train=y_train.as_matrix() # cast as a ndarray
#scale and normalize y_train
#y_train=proc.scale(np.nan_to_num(y_train),axis=0)
scaler2=proc.StandardScaler().fit(y_train)
y_train=scaler2.transform(np.nan_to_num(y_train))
#y_train=proc.normalize(np.nan_to_num(y_train),axis=0)
#set the point where samples are split for testing and training
test_split=5000
y=y_train/scale #save y_train in another variable, sorry this is confusing and not good practice
y_train=y[lookback:test_split,:] #takes a splice of y to create the ytrain data
y_test=y[test_split:24000,:] #creat ytest
# split data
x_train=newData[lookback:test_split,:,:]
x_test=newData[test_split:24000,:,:]
################## Keras Neural Network Design, Training, and Prediction ######################################################
###############################################################################################################################
# design network
input_shape=(x_train.shape[1], x_train.shape[2])
model = Sequential()
#network layers###########################
model.add(LSTM(25,return_sequences=True,input_shape=input_shape,activation='selu'))
#model.add(keras.layers.LeakyReLU())
model.add(LSTM(10))
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(Dense(10))
model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(Dense(5))
model.add(keras.layers.LeakyReLU(alpha=0.3))
#model.add(keras.layers.LeakyReLU(alpha=0.3))
model.add(Dense(2))
model.add(keras.layers.LeakyReLU(alpha=0.3))
#model.add(keras.layers.LeakyReLU(alpha=0.3))
#network compiling#########################
model.compile(loss='mae', optimizer='adam')
#fit network
history = model.fit(x_train, y_train[0::timesteps], epochs=50, batch_size=1000, validation_split=0.1,verbose=2, shuffle=False)
# plot history
#plt.plot(history.history['loss'], label='train')
#plt.plot(history.history['val_loss'], label='test')
#plot_model(model, to_file='model.png',show_shapes=True)
#predct and plot data#######################
yhat=model.predict(x_train,batch_size=x_train.shape[0])
yhat_test=model.predict(x_test,batch_size=x_test.shape[0])
plt.plot(time[lookback:test_split],y_train[:,0],time[lookback:test_split],yhat[:,0])
plt.figure()
plt.plot(time[test_split:24000],scaler2.inverse_transform(y_test*scale)[:,0],time[test_split:24000],scaler2.inverse_transform(yhat_test*scale)[:,0],time[test_split:24000],full_data['HB_NORTH_DayAhead'].as_matrix()[test_split+24:24000+24])
plt.legend()
#############plot error######################
error = (scaler2.inverse_transform(scale* yhat_test)[:,0]-scaler2.inverse_transform(scale* y_test)[:,0])
industry_pred_error = (full_data['HB_NORTH_DayAhead'].as_matrix()[test_split:24000] - full_data['HB_NORTH_RealTime'].as_matrix()[test_split:24000] )
plt.figure()
plt.hist(error,bins='auto')
plt.hist(industry_pred_error,bins='auto')
#plt.plot(industry_pred_error)
print( np.mean(np.abs(industry_pred_error)) )
print( np.mean(np.abs(error)))
with tf.Session():
print(keras.losses.mean_absolute_percentage_error(yhat_test[:,0],y_test[:,0]).eval())
print(keras.losses.mean_absolute_percentage_error(yhat_test[:,0],y_test[:,0]).eval())
percent_error = ( yhat_test[:,0] - y_test[:,0] ) / y_test[:,0]
print(np.mean(np.abs(percent_error)))
#plt.figure()
#plt.plot(error)
plt.figure()
plt.plot(percent_error)
#plt.figure()
#plt.hist(percent_error,bins='auto')
plt.show()
|
[
"brian.kilberg@gmail.com"
] |
brian.kilberg@gmail.com
|
a82788b8e5de0a6b7d3b686e334b982166d16950
|
543fd700f68d778e515cef372622091d9598ed53
|
/gym_virtual_office/envs/__init__.py
|
93c354afb1b988be1e2d0f94ede6fb60f5535649
|
[] |
no_license
|
gravesec/gym-virtual-office
|
f964dbf24ee030769de409bc62aed35f3c6d4c92
|
033c91bad5dd74ecfd681eee88dab214e8dbfded
|
refs/heads/main
| 2023-03-16T11:45:49.166798
| 2021-03-02T20:29:52
| 2021-03-02T20:29:52
| 310,744,005
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
from gym_virtual_office.envs.virtual_office_env import VirtualOfficeEnv
|
[
"graves@ualberta.ca"
] |
graves@ualberta.ca
|
2c04b37f2803ad958fabfab7ebe717d712fcdb86
|
7b8ff0f8642fcb2cfbe75cae2c19b82ee0f765de
|
/torch_code/seg_of_rectum/div_of_regtum/emb_train.py
|
99ace8cfbd042f7feddee4a18070a8dba58aeaa0
|
[] |
no_license
|
Moeo3/nevertrustanyone
|
9c8b5f6aa410a88dc547e922b2f144a095fa3fc9
|
b957b5d34d311e602c1ab8af306081ae0c4b765e
|
refs/heads/master
| 2023-02-15T20:01:17.380376
| 2021-01-10T18:15:23
| 2021-01-10T18:15:23
| 325,161,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,972
|
py
|
from dice_loss import DiceLoss
from unet import UNet
from emb_dataset import EmbDataset
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
import os
from torch.utils.data import DataLoader
import xlwt
from torch.nn import BatchNorm2d, Conv2d
from torch.nn.init import kaiming_normal_
def init_weight(model):
for m in model.modules():
if isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.fill_(0)
elif isinstance(m, Conv2d):
kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
def save_ckpt(ckpt_path, model_name, epoch, dict):
ckpt_path = os.path.join(ckpt_path, model_name)
epoch = str(epoch).zfill(2)
if not os.path.exists(ckpt_path):
os.mkdir(ckpt_path)
save_path = os.path.join(ckpt_path, f'epoch{epoch}.pth')
torch.save(dict, save_path)
def epoch_step(net, dataloader, opt, loss, train_phrase, device):
if train_phrase == 'train':
net.train()
train_tag = True
else:
net.eval()
train_tag = False
dice_loss_total = 0.
for step, batch in enumerate(dataloader):
features = batch['features'].to(device)
labels = batch['labels'].to(device)
if train_tag:
opt.zero_grad()
pred = net(features)
dice_loss = loss(labels, pred)
dice_loss_total = dice_loss_total + dice_loss.item()
if train_tag:
dice_loss.backward()
opt.step()
return dice_loss_total / len(dataloader)
def re_train(net, train_dataloader, val_dataloader, ckpt_path, xls_path):
wb = xlwt.Workbook()
ws = wb.add_sheet('dice_loss')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.float().to(device)
net.apply(init_weight)
opt = Adam(net.parameters(), lr=2e-4)
sch = StepLR(opt, step_size=10, gamma=0.5)
loss = DiceLoss()
max_epoch = 101
cnt = 0
stop_cnt = 10
min_dice_loss = 1.
stop_flag = False
for i in range(max_epoch):
train_dice_loss = epoch_step(net, train_dataloader, opt, loss, 'train', device)
val_dice_loss = epoch_step(net, val_dataloader, opt, loss, 'val', device)
loss_list = [train_dice_loss, val_dice_loss]
print(f'in epoch{i}: train dice loss is {train_dice_loss}, test dice loss is {val_dice_loss}')
for j in range(len(loss_list)):
ws.write(i, j, loss_list[j])
if val_dice_loss < min_dice_loss:
min_dice_loss = val_dice_loss
save_ckpt(ckpt_path, 'emb', i, net.state_dict())
cnt = 0
else:
cnt = cnt + 1
if cnt == stop_cnt:
stop_flag = True
break
sch.step()
if not stop_flag:
save_ckpt(ckpt_path, 'emb', i, net.state_dict())
wb.save(os.path.join(xls_path, 'seg_of_rectum_emb.xls'))
if __name__ == "__main__":
img_path = '/home/zhangqianru/data/seg_of_rectum/div_of_rectum/origin_img_2Dslice'
mask_path = '/home/zhangqianru/data/seg_of_rectum/div_of_rectum/seg_label_2Dslice'
model_res_path = '/home/zhangqianru/data/seg_of_rectum/div_of_rectum/model_results/mask'
ckpt_path = '/home/zhangqianru/data/seg_of_rectum/div_of_rectum/ckpt'
xls_path = '/home/zhangqianru/data/seg_of_rectum/div_of_rectum/xls'
model_set = ['unet', 'unet_3layers', 'unet_3layers_with_vgg_loss', 'unet_with_vgg_loss']
train_dataset = EmbDataset(img_path, mask_path, model_res_path, model_set, 'train')
val_dataset = EmbDataset(img_path, mask_path, model_res_path, model_set, 'val')
channels_in = len(train_dataset.model_set) + 1
net = UNet(channels_in, 1)
train_dataloader = DataLoader(train_dataset, batch_size=3, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=3, shuffle=False)
re_train(net, train_dataloader, val_dataloader, ckpt_path, xls_path)
pass
|
[
"moeo3@hotmail.com"
] |
moeo3@hotmail.com
|
8a0763af473c434edf81585b62087042c138499c
|
418e890e4d56535830c3ac46d5de45666f4d7cc4
|
/experiments/migrations/0003_auto_20160808_1507.py
|
a8c9469f53cf89e1784f1e739deedf7c7d6eb324
|
[] |
no_license
|
alonappleboim/labdb
|
87d161451143017318588d8cdf9cbd09128b29cd
|
5a04fe57350d69176cc832964fa20ebd9248b558
|
refs/heads/master
| 2021-01-20T19:27:23.659832
| 2016-08-08T14:34:01
| 2016-08-08T14:34:01
| 64,955,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-08 12:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('experiments', '0002_auto_20160808_0004'),
]
operations = [
migrations.RemoveField(
model_name='experiment',
name='description',
),
migrations.RemoveField(
model_name='experimentmetafile',
name='description',
),
migrations.AddField(
model_name='experiment',
name='desc',
field=models.TextField(default='', max_length=2000, verbose_name='description'),
preserve_default=False,
),
migrations.AddField(
model_name='experimentmetafile',
name='desc',
field=models.TextField(blank=True, max_length=2000, verbose_name='description'),
),
migrations.AlterField(
model_name='experiment',
name='title',
field=models.CharField(max_length=120, unique=True),
),
]
|
[
"alonappleboim@gmail.com"
] |
alonappleboim@gmail.com
|
543e8cec710889161acb56067571833d8ab5fefa
|
d8b7d6f4f947a4c72b9efa996d0ecd6156ff1818
|
/apps/operation/migrations/0001_initial.py
|
7a228366dc5e7f34bcf10249c8f459cd461b6103
|
[] |
no_license
|
MjSeven/MxOnlie
|
71a0f563d5558b40085d39e806b1963ae366356f
|
3f7fd83c7adbed899a3f4fdf69b2a3900b6d0de9
|
refs/heads/master
| 2020-03-23T16:32:23.654166
| 2018-07-25T08:16:31
| 2018-07-25T08:16:31
| 141,816,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,441
|
py
|
# Generated by Django 2.0.7 on 2018-07-21 22:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=250, verbose_name='评论')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='评论时间')),
],
options={
'verbose_name': '课程评论',
'verbose_name_plural': '课程评论',
},
),
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('course_name', models.CharField(max_length=50, verbose_name='课程名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户咨询',
'verbose_name_plural': '用户咨询',
},
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户课程',
'verbose_name_plural': '用户课程',
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_id', models.IntegerField(default=0)),
('fav_type', models.IntegerField(choices=[(1, '课程'), (2, '课程机构'), (3, '讲师')], default=1, verbose_name='收藏类型')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='评论时间')),
],
options={
'verbose_name': '用户收藏',
'verbose_name_plural': '用户收藏',
},
),
migrations.CreateModel(
name='UserMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.IntegerField(default=0, verbose_name='接收用户')),
('message', models.CharField(max_length=500, verbose_name='消息内容')),
('has_read', models.BooleanField(default=False, verbose_name='是否已读')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户消息',
'verbose_name_plural': '用户消息',
},
),
]
|
[
"yuchenmj@163.com"
] |
yuchenmj@163.com
|
1df2a2aed62fe4a0e04dc66c6288fd1f89c5ef0e
|
01d2b09cb905be1de4ad4454ff9aa3ebcd139613
|
/python/gutenberg.py
|
88c3237847d88af81abf316c0e7d2f0e159dcdb9
|
[] |
no_license
|
quickly3/fun-python
|
d468678ed16bdb8e5fd732e5fe1721fa98e8258f
|
fc2086c8e68817fb8af540330f2b095316416dc6
|
refs/heads/master
| 2020-11-30T08:27:37.455682
| 2019-12-27T03:17:15
| 2019-12-27T03:17:15
| 230,356,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# import nltk
# # print nltk.corpus.gutenberg.fileids()
# emma = nltk.corpus.gutenberg.words('austen-emma.txt')
# print len(set([w.lower() for w in emma]))
tup = (1,2);
# print type(tup)
#
list1 = list(tup)
print list1[1]
|
[
"hongbin@inceptionpad.com"
] |
hongbin@inceptionpad.com
|
d027b8a7f8bb72ba4af08fb477f9304a181f3206
|
b4821fc60d7b61d1df00159f0fa0640379b14aaa
|
/Python_Algos/store_and_products/store_and_products.py
|
497060201873ac96b7811d2c57a0a9098dfd1989
|
[] |
no_license
|
Lstedmanfalls/Algorithms
|
fefdfc1b7398e385544bedb9e899745e614593e9
|
8fa5a0c9acb1da779a8b3539a9e9cfb5792e093b
|
refs/heads/main
| 2023-08-20T05:26:40.115860
| 2021-10-25T19:07:12
| 2021-10-25T19:07:12
| 392,750,762
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
from product_class_module import Product
from store_class_module import Store
Safeway = Store("Safeway")
banana = Product("banana", 100, "fruit")
mango = Product("mango", 50, "fruit")
cheese = Product("cheese", 10, "dairy")
Safeway.add_product(banana) # Adding banana
Safeway.add_product(mango) # Adding mango
Safeway.add_product(cheese) # Adding cheese
Safeway.store_info() # Checking that products were added
banana.print_info() # Checking banana's info
mango.print_info() # Checking mango's info
cheese.print_info() # Checking cheese's info
mango.update_price(.5, True).print_info() # Checking that mango price incresed
banana.update_price(.5, False).print_info() # Checking that banana price decreased
Safeway.inflation(.5) # Checking inflation method
banana.print_info() # Checking that banana price increased
cheese.print_info() # Checking that cheese price increased
Safeway.set_clearance("fruit", .5) # Checking clearance method
banana.print_info() # Checking that banana price decreased
mango.print_info() # Checking that mango price decreased
cheese.print_info() # Checking that cheese price did not change
Safeway.sell_product(mango) # Checking mango sale
Safeway.store_info() # Checking that mango was removed
|
[
"lstedmanfalls@gmail.com"
] |
lstedmanfalls@gmail.com
|
66afb570ab66ba33b317f752ad254d54d1f581bb
|
b44874df0d6edd7eed451d798f72d3dc098b075d
|
/sndacspylib/test/basic_unit_test.py
|
404828f73338c94c68a1b2602d8647316dc22a48
|
[] |
no_license
|
grandcloud/sndacs-python
|
2d8069ac74c86983b2b421462a7641b081705302
|
c47f0367e857bb18af5df96f0d13a2ff4a60a1ab
|
refs/heads/master
| 2020-05-31T18:14:46.401348
| 2012-10-19T03:07:28
| 2012-10-19T03:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
'''
Created on Jul 12, 2012
'''
from sndacspylib.snda_cs_config import *
import sndacspylib.snda_cs.cs_rest as CSRest
import sndacspylib.snda_cs.cs_util as CSUtil
import uuid
# initialize connection
connection = CSRest.SNDAAuthConnection(Config.CSProperties['AccessKey'], Config.CSProperties['SecretKey'], True)
# initialize service
service = CSUtil.SNDA_CS(ConnectionObject = connection)
# list buckets
bucket_list = service.get_list_of_buckets()
for item in bucket_list:
print bucket_list[item]
bucket_name = str(uuid.uuid4())
# add bucket
service.add_bucket(bucket_name, 'huadong-1')
object_name = str(uuid.uuid4())
# initialize object
object = CSUtil.SNDA_Object(connection, bucket_name, object_name)
# add object
object.put_object_from_file("filepath/file")
# head object
infos = object.get_object_info()
print infos.metadata
print infos.size
print infos.last_modified
# get object
object.get_object_to_file("filepath/file.bak")
import commands
md5sum1 = commands.getoutput("md5sum filepath/file").split()[0]
md5sum2 = commands.getoutput("md5sum filepath/file.bak").split()[0]
print md5sum1
print md5sum2
# initialize bucket
bucket = CSUtil.SNDA_Bucket(connection, bucket_name)
# list object
object_list = bucket.get_list_of_keys_in_bucket("", "")
for item in object_list:
print item
# add object from string
object.put_object_from_string('I am a string.')
# delete object
object.delete_object()
# delete bucket
service.delete_bucket(bucket_name)
|
[
"jiangwenhan@snda.com"
] |
jiangwenhan@snda.com
|
03d83e89ff0074fa7995b211731b30a6d56ffff5
|
93494ae79b2de58a6e27abf6ebd973f3375e5a85
|
/spider_9939/db_handle.py
|
4ffe3d8ba6a786decfba2f87d66f1c073318a3e4
|
[] |
no_license
|
EstelleYang/spider_news
|
5d5a04fc317e02b933ce7073d640f99d2e07900b
|
59c0172cdeeb9ffb2b077cbf71603081da924250
|
refs/heads/master
| 2020-05-04T20:46:28.359781
| 2019-04-04T07:50:32
| 2019-04-04T07:50:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019-04-04 3:01 PM
# @Author : jiuyang
# @File : db_handle.py
# 连接数据库,操作数据入库等
|
[
"1635375337@qq.com"
] |
1635375337@qq.com
|
af6040330d3ea4df2b34a2bbeaed87e050733e77
|
6583b7f11175c40106fb7cc0037578abae125f42
|
/ucc/codegen/order_triples.py
|
7237463432be110765267840efc34374fa6de361
|
[] |
no_license
|
FREDY1969/tampa-bay-python-avr
|
02f913ee8373bfab4ef88902844476080b560226
|
e0311815ebf81b5e1b128f621bf1f15b4fa28289
|
refs/heads/master
| 2020-04-24T17:45:03.787951
| 2011-10-23T17:58:09
| 2011-10-23T17:58:09
| 40,279,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,707
|
py
|
# order_triples.py
import sys
import itertools
from ucc.database import crud
Debug = False
def order_children():
update_order_constraints()
with crud.db_transaction():
iterations = 0
re_triple_count = re_block_count = re_fun_count = 0
tp_order_count = 1 # force calc_reg_est_for_triples first time through
tl_triple_order_count = 0
total = 1 # force first run through the loop
while total:
total = 0
if tp_order_count or re_fun_count:
re_triple_count = calc_reg_est_for_triples()
total += re_triple_count
else:
re_triple_count = 0
if re_triple_count:
re_block_count = calc_reg_est_for_blocks()
total += re_block_count
else:
re_block_count = 0
if re_block_count:
re_fun_count = calc_reg_est_for_functions()
total += re_fun_count
else:
re_fun_count = 0
if re_triple_count:
tp_order_count = update_triple_parameter_orders()
total += tp_order_count
else:
tp_order_count = 0
iterations += 1
update_top_level_triple_orders()
calc_master_order()
return iterations
def update_order_constraints():
with crud.db_transaction():
print("propogate_links iterations:", propogate_links(), file=sys.stderr)
delete_extranious_links()
add_transitive_links()
def propogate_links():
r'''Propogate links upward through the heirarchy.
Propogate both sides of every constraint up through all of the parents
to the roots of both sides. This will create a lot of extranious links
(including links where the predecessor and successor are the same triple!)
which we'll clean up later.
Returns the number of times it looped.
'''
total = 1 # force first run through the loop
iterations = 0
while total:
# Add links from parents of predecessors:
total = crud.execute('''
insert or ignore into triple_order_constraints
(predecessor, successor, orig_pred, orig_succ)
select tp.parent_id, tos.successor, tos.orig_pred,
tos.orig_succ
from triple_order_constraints tos
inner join triple_parameters tp
on tos.predecessor = tp.parameter_id
''')[0]
# Add links to parents of successors:
total += crud.execute('''
insert or ignore into triple_order_constraints
(predecessor, successor, orig_pred, orig_succ)
select tos.predecessor, tp.parent_id, tos.orig_pred,
tos.orig_succ
from triple_order_constraints tos
inner join triple_parameters tp
on tos.successor = tp.parameter_id
''')[0]
iterations += 1
return iterations
def delete_extranious_links():
# When node A has both the predecessor and successor of a constraint as
# (deep) children; we don't need predecessor constraints from an outside
# node B that might be sharing those children, since it doesn't matter
# whether B is done before or after A (in either case the predecessor gets
# done before the successor: B -> A runs P in B, then S in A, and A -> B
# runs P and S in A).
#
# This doesn't apply to outside successor links. In this case, the order
# does matter (B -> A runs S in B, then P in A).
#
# Note that in this case node A will have a constraint showing A as both
# predecessor and successor. This is how we'll identify these nodes.
#
# So delete all links from predecessors to nodes that link the predecessor
# to themselves as the successor. (This also deletes nodes that have
# predecessor = successor).
crud.execute('''
delete from triple_order_constraints
where exists (select null
from triple_order_constraints tos
where triple_order_constraints.orig_pred = tos.orig_pred
and triple_order_constraints.orig_succ = tos.orig_succ
and triple_order_constraints.successor = tos.successor
and tos.predecessor = tos.successor)
''')
# Then clean up by deleting all links where both the predecessor and
# successor are the same node, and all links that aren't between siblings.
crud.execute('''
delete from triple_order_constraints
where predecessor = successor
or (not exists (
-- sibling relationship between predecessor and successor
select null
from triple_parameters ptp
inner join triple_parameters stp
on ptp.parent_id = stp.parent_id
and ptp.parameter_num != stp.parameter_num
where ptp.parameter_id = predecessor
and stp.parameter_id = successor)
and not exists (
-- predecessor and successor top-levels for same block
select null
from triples pt
inner join triples st
on pt.block_id = st.block_id
and pt.use_count = 0
and st.use_count = 0
and pt.id != st.id
where pt.id = predecessor
and st.id = successor))
''')
# And finally delete all but one of duplicate predecessor, successor links
# (this destroys orig_pred and orig_succ which aren't needed any more).
crud.execute('''
delete from triple_order_constraints
where exists
(select null
from triple_order_constraints tos
where tos.predecessor = triple_order_constraints.predecessor
and tos.successor = triple_order_constraints.successor
and (tos.orig_pred < triple_order_constraints.orig_pred or
tos.orig_pred = triple_order_constraints.orig_pred and
tos.orig_succ < triple_order_constraints.orig_succ))
''')
def add_transitive_links():
r'''Add transitive links. E.g., where A->B and B->C, add A->C.
Returns the number times it ran the SQL command.
'''
for depth in itertools.count(1):
rowcount = crud.execute('''
insert or replace into triple_order_constraints
(predecessor, successor, depth)
select tos_p.predecessor, tos_s.successor,
tos_p.depth + 1
from triple_order_constraints tos_p
inner join triple_order_constraints tos_s
on tos_p.successor = tos_s.predecessor
where tos_p.depth = ? and tos_s.depth = 1
''',
(depth,))[0]
if rowcount == 0:
return depth
def calc_reg_est_for_triples():
r'''Calc triples.register_est.
Triples must have an evaluation_order for all of their triple_parameters.
Returns the number of triples updated.
'''
total = crud.execute('''
update triples
set register_est = max(
(select max(1, count(*))
from triple_parameters tp
where tp.parent_id = triples.id)
+ ifnull((select num_extra_regs
from operator_info io
where io.operator = triples.operator), 0),
case when triples.operator = 'call_direct'
then (select sym.register_est
from symbol_table sym
where triples.symbol_id = sym.id)
else 0
end,
(select
ifnull(max(child.register_est + tp2.evaluation_order
- 1),
0)
from triple_parameters tp2
inner join triples child
on tp2.parameter_id = child.id
where tp2.parent_id = triples.id))
where triples.register_est isnull
and not exists (select null
from triple_parameters tp
where tp.parent_id = triples.id
and tp.evaluation_order isnull)
and (triples.operator != 'call_direct'
or (select sym.register_est notnull
from symbol_table sym
where triples.symbol_id = sym.id))
''')[0]
if Debug: print("update triples total", total, file=sys.stderr)
return total
def calc_reg_est_for_blocks():
r'''Calc blocks.register_est.
Blocks must have a register_est for all of their top-level triples.
Returns the number of blocks updated.
'''
# calc register_est for all blocks who have a register_est for all of
# their top-level triples.
total = crud.execute('''
update blocks
set register_est =
(select ifnull(max(t.register_est), 0)
from triples t
where t.block_id = blocks.id
and t.use_count = 0)
where blocks.register_est isnull
and not exists (select null
from triples t
where t.block_id = blocks.id
and t.use_count = 0
and t.register_est isnull)
''')[0]
if Debug: print("update blocks total", total, file=sys.stderr)
return total
def calc_reg_est_for_functions():
r'''Calc symbol_table.register_est for kind in ('function', 'task').
Functions/tasks must have at least one block and have a register_est for
all of their blocks.
Returns the number of symbols updated.
'''
total = crud.execute('''
update symbol_table
set register_est =
(select max(b.register_est)
from blocks b
where b.word_symbol_id = symbol_table.id) +
(select count(*)
from symbol_table v
where v.context = symbol_table.id
and v.kind in ('parameter', 'var'))
where symbol_table.kind in ('function', 'task')
and symbol_table.register_est isnull
and exists (select null
from blocks b
where b.word_symbol_id = symbol_table.id)
and not exists (select null
from blocks b
where b.word_symbol_id = symbol_table.id
and b.register_est isnull)
''')[0]
if Debug: print("update symbol_table total", total, file=sys.stderr)
return total
def update_triple_parameter_orders():
r'''Calculates and updates the triple_parameters.evaluation_order column.
This works on the level of the set of parameters to each triples node who
still need it. All of the triples parameters must have a register_est.
Returns the number of triple_parameters updated.
'''
# Create table to assign sequential evaluation_order numbers to
# sorted triple_parameters.
crud.execute('''
create temp table param_order (
seq_num integer not null primary key, -- assigned seq number
tp_id int not null, -- triple_parameters id
parent_id int not null -- parent triple id
)
''')
# Load temp param_order table with all sets of triple_parameters that
# are ready to order.
total = crud.execute('''
insert into param_order (tp_id, parent_id)
select tp.id, tp.parent_id
from triple_parameters tp
where tp.parent_id in
(select t.id
from triples t
where t.register_est isnull
and not exists (select null
from triple_parameters ctp
inner join triples c
on ctp.parameter_id =
c.id
where ctp.parent_id = t.id
and c.register_est isnull))
order by tp.parent_id,
max((select t.register_est * 1000
from triples t
where tp.parameter_id = t.id
),
(select
ifnull(max(t.register_est * 1000
+ tos.depth),
0)
from triple_order_constraints tos
inner join triples t
on tos.successor = t.id
where tos.predecessor = tp.parameter_id
)) desc,
tp.parameter_num
''')[0]
if Debug: print("insert param_order total", total, file=sys.stderr)
if total:
# Copy the assigned seq_nums from param_order to triple_parameters.
rowcount = crud.execute('''
update triple_parameters
set evaluation_order =
(select 1 + po.seq_num
- (select min(sibling_po.seq_num)
from param_order sibling_po
where sibling_po.parent_id =
triple_parameters
.parent_id)
from param_order po
where po.tp_id = triple_parameters.id)
where exists (select null
from param_order po
where po.tp_id = triple_parameters.id)
''')[0]
if Debug:
print("update triple_parameters total", rowcount, file=sys.stderr)
# We're done with the param_order table.
crud.execute('''
drop table param_order
''')
return total
def update_top_level_triple_orders():
r'''Calculates and updates the triples.order_in_block column.
This works on a block level for all blocks who still need it, and all of
whose top-level triples have a register_est.
Returns the number of triples updated.
'''
# Create table to assign sequential evaluation_order numbers to
# sorted top-level triples.
crud.execute('''
create temp table param_order (
seq_num integer not null primary key, -- assigned seq number
block_id int not null,
triple_id int not null
)
''')
# Load temp param_order table with all sets of top-level triples that
# are ready to order.
total = crud.execute('''
insert into param_order (block_id, triple_id)
select t.block_id, t.id
from triples t
where t.use_count = 0
and t.order_in_block isnull
and not exists
(select null
from triples sib
where sib.use_count = 0
and t.block_id = sib.block_id
and sib.register_est isnull)
order by t.block_id,
ifnull((select 0
from blocks b
where t.block_id = b.id
and b.last_triple_id = t.id
),
(select ifnull(max(tos.depth) + 1, 1)
from triple_order_constraints tos
where tos.predecessor = t.id
)) desc,
t.id
''')[0]
if Debug: print("insert param_order total", total, file=sys.stderr)
if total:
# Copy the assigned seq_nums from param_order to triples.
rowcount = crud.execute('''
update triples
set order_in_block =
(select 1 + po.seq_num
- (select min(block_po.seq_num)
from param_order block_po
where block_po.block_id =
triples.block_id)
from param_order po
where po.triple_id = triples.id)
where id in (select triple_id from param_order)
''')[0]
if Debug: print("update triples total", rowcount, file=sys.stderr)
# We're done with the param_order table.
crud.execute('''
drop table param_order
''')
return total
def calc_master_order():
with crud.db_transaction():
calc_tree_sizes()
calc_abs_offsets()
mark_ghost_links()
calc_abs_order_in_block()
calc_parent_seq_num()
def calc_tree_sizes():
r'''Calculate all triples.tree_size figures.
Tree_size is the number of triples in the tree rooted at that triple
(counting the triple itself).
'''
total = 1
while total:
total = crud.execute('''
update triples
set tree_size = (select ifnull(sum(child.tree_size), 0)
+ 1
from triple_parameters tp
inner join triples child
on tp.parameter_id = child.id
where tp.parent_id = triples.id)
where tree_size isnull
and not exists (select null
from triple_parameters tp
inner join triples child
on tp.parameter_id = child.id
where tp.parent_id = triples.id
and child.tree_size isnull)
''')[0]
def calc_abs_offsets():
r'''Calculate abs_offsets for top-level triples and triple_parameters.
The abs_offset is the offset from the start of the block to the tree rooted
at that node.
The abs_offsets stored in the triple_parameters stand for the parameter_id
tree.
'''
# first for top-level triples:
crud.execute('''
update triples
set abs_offset =
(select ifnull(sum(prior.tree_size), 0)
from triples prior
where prior.block_id = triples.block_id
and prior.use_count = 0
and prior.order_in_block < triples.order_in_block)
where use_count = 0
''')
# then for triple_parameters:
total = 1
while total:
total = crud.execute('''
update triple_parameters
set abs_offset =
(select ifnull(min(parent.abs_offset),
(select t.abs_offset
from triples t
where triple_parameters.parent_id
= t.id))
from triple_parameters parent
where parent.parameter_id =
triple_parameters.parent_id)
+ (select ifnull(sum(prior.tree_size), 0)
from triple_parameters tp
inner join triples prior
on tp.parameter_id = prior.id
where tp.parent_id = triple_parameters.parent_id
and tp.evaluation_order <
triple_parameters.evaluation_order)
where abs_offset isnull
and not exists
(select null
from triple_parameters parent
where parent.parameter_id =
triple_parameters.parent_id
and parent.abs_offset isnull)
''')[0]
def mark_ghost_links():
r'''Set triple_parameter.ghost for links to ghost triples.
Ghost triples have already been evaluated by the time this
triple_parameter is needed. So the triple is a ghost, and code is not
generated for it here.
'''
crud.execute('''
update triple_parameters
set ghost = 1
where triple_parameters.abs_offset >
(select min(tp.abs_offset)
from triple_parameters tp
where triple_parameters.parameter_id = tp.parameter_id)
''')
def calc_abs_order_in_block():
r'''Calc abs_order_in_block for triples and triple_parameters.
The abs_order_in_block in triple_parameters is for the parameter_id
triple. This is later copied to the triples in reg_alloc.py.
'''
# first for top-level triples. Note that the root node is always
# evaluated last, so we just add the tree_size to the abs_offset of the
# start of the tree:
crud.execute('''
update triples
set abs_order_in_block = abs_offset + tree_size
where use_count = 0
''')
# then for triple_parameters:
crud.execute('''
update triple_parameters
set abs_order_in_block = abs_offset +
case when ghost
then 1
else (select tree_size
from triples child
where triple_parameters.parameter_id = child.id)
end
''')
def calc_parent_seq_num():
r'''Calculate triple_parameters.parent_seq_num.
The parent_seq_num gives sequential numbers to all parents of the same
triple. The numbers are in the order that the triple_parameters will be
used in the code generation. But the numbers do not start from 1 for each
set of parents...
'''
# Create table to assign sequential numbers to sorted triple_parameters.
crud.execute('''
create temp table param_order (
seq_num integer not null primary key, -- assigned seq number
tp_id int not null
)
''')
# Load temp param_order table with all triple_parameters.
total = crud.execute('''
insert into param_order (tp_id)
select id
from triple_parameters
order by parameter_id, abs_order_in_block
''')[0]
if Debug: print("insert param_order total", total, file=sys.stderr)
# Copy the assigned seq_nums from param_order to triple_parameters.
crud.execute('''
update triple_parameters
set parent_seq_num =
(select seq_num
from param_order po
where triple_parameters.id = po.tp_id)
''')
# We're done with the param_order table.
crud.execute('''
drop table param_order
''')
# Set triple_parameters.last_parameter_use for all last parameters:
crud.execute('''
update triple_parameters
set last_parameter_use = 1
where not exists
(select null
from triple_parameters tp
where tp.parameter_id = triple_parameters.parameter_id
and tp.parent_seq_num > triple_parameters.parent_seq_num)
''')
|
[
"dangyogi@gmail.com"
] |
dangyogi@gmail.com
|
e74d1421fdda784ab35448a784c31e015b6e5490
|
7dd173951a33396aecbc370c8f5dd5747d32ab60
|
/Project2/tryLA.py
|
92abe3f0939e3a1be2d8af660aa47033580f018e
|
[] |
no_license
|
lassekva/CompPhysFYS4150
|
ee2c9abb784a1640ad0e5cff86a7a47008e6778d
|
f4febc61bdce167ef3af1590fd16ef4111b4b5ed
|
refs/heads/master
| 2020-03-27T05:27:06.870560
| 2018-11-19T12:24:35
| 2018-11-19T12:24:35
| 146,020,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
import numpy as np
from numpy import linalg as LA
w, v =LA.eig(np.diag((10,2,6)))
d= np.diag((1,2,3,4,5))
print (w)
|
[
"noreply@github.com"
] |
lassekva.noreply@github.com
|
efe175e6885e0ddfe3cbd66e3a647f749c4ff403
|
a78d39ecab243eedaec441a1a483b3aaa36328d1
|
/rest_api_test/test_2.py
|
b85361ce3c2d5addeabe11cccbed0f9f055089f4
|
[] |
no_license
|
SoufianLa/hackerrankps
|
b570cf9400d5c957b541a01a9793f54c4041ae54
|
0fb23fabe8271747d93545003734633d934bf772
|
refs/heads/master
| 2022-12-30T05:32:01.374515
| 2020-10-20T16:18:49
| 2020-10-20T16:18:49
| 304,647,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
import requests
sys.stdin = open('input_.txt', 'r')
sys.stdout = open('output_.txt', 'w')
#
# Complete the 'getNumDraws' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER year as parameter.
#
def getTotalByPage(url, page):
number_page = 0
url = url + "&page=" + str(page)
rsp = requests.get(url).json()
data = rsp["data"]
#number_page += sum(x.get('team1goals') == x.get('team1goals') for x in data)
number_page += len(data)
return number_page
def getNumDraws(year):
number = 0
for j in range(11):
url = "https://jsonmock.hackerrank.com/api/football_matches?team1goals="+str(j)+"&team2goals="+str(j)+"&year="+str(year)
first_call = requests.get(url).json()
number += first_call["total"]
return number
if __name__ == '__main__':
year = int(input().strip())
result = getNumDraws(year)
print(result)
|
[
"s.lagnaoui@revotrends.com"
] |
s.lagnaoui@revotrends.com
|
67a7c2953576faefbf87733f908a58464756ee94
|
e67509d6e9c34959e915407696cf53544e1fe86d
|
/analytics/GA/forms.py
|
66fd1ed9310763edbbc3be53bad5bd73184158ac
|
[] |
no_license
|
ryuzaki07/analytics
|
3d33f8b4803cd526f8bdd1f6534ee897d8e75c13
|
d2502eeb22acc51c897c0c6b61125c6b4df8881a
|
refs/heads/main
| 2023-06-02T09:47:50.850076
| 2021-06-16T12:01:15
| 2021-06-16T12:01:15
| 377,456,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
import datetime
from django import forms
# input_formats=['%m-%d-%Y']
class DateForm(forms.Form):
start_date = forms.CharField(widget=forms.TextInput(
attrs={'placeholder': "Format: yyyy-m-d"}))
end_date = forms.CharField(widget=forms.TextInput(
attrs={'placeholder': "Format: yyyy-m-d"}))
|
[
"rahulnair.rn34@gmail.com"
] |
rahulnair.rn34@gmail.com
|
3a7b36b87c048318de11c4a405e323e93507b808
|
cfcd8deee58c343d7747d40cb4e695c837f833e2
|
/linked_list.py
|
1fae3f631768d9a01ccf53e6ce231c855a1d8acd
|
[] |
no_license
|
lkramer37/TDD-DataStructures
|
bbd85a7a794466f64823a698f93acf5713c9b32c
|
9b5398ad8c1918dc7ebd90d97f507bfda7a429fa
|
refs/heads/master
| 2022-04-13T05:02:24.334309
| 2020-03-28T21:11:35
| 2020-03-28T21:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,852
|
py
|
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
# def __repr__(self):
# return 'Node <{}>'.format(self.data)
class LinkedList:
def __init__(self, head=None):
self.head = head
# super().__init__()
def length(self):
if self.head is None:
return 0
curr = self.head
size = 0
while curr is not None:
size = size + 1
curr = curr.next
return size
def append(self, data):
new_node = Node(data, None)
if self.head:
current = self.head
while current.next:
current = current.next
current.next = new_node
else:
self.head = new_node
return new_node
def insert(self, index, data):
# Adds element after specified index
i = 1
curr = self.head
while i < index - 1 and curr is not None:
curr = curr.next
i = i + 1
if curr is None:
print("Index out of bound")
else:
new_node = Node(data)
new_node.next = curr.next
curr.next = new_node
def delete(self, data_str):
# Removes first item with specified value
curr = self.head
prev = None
while curr and curr.data != data_str:
prev = curr
curr = curr.next
if prev is None:
self.head = curr.next
elif curr:
prev.next = curr.next
curr.next = None
def remove(self, index):
# Removes the element at the specified position
curr = self.head
prev = None
x = 0
while x < index:
prev = curr
curr = curr.next
x += 1
# Unlink it from the list
if prev is None:
self.head = curr.next
elif curr:
prev.next = curr.next
curr.next = None
return curr
def search(self, data_str):
# Return first node that matches data_str
if self.head is None:
print("List has no elements")
return
curr = self.head
while curr is not None:
if curr.data == data_str:
print(data_str + " found in list")
return True
curr = curr.next
print(data_str + " not found in list")
return False
def is_empty(self):
return self.head is None
def print_list(self):
if self.head is None:
print("List has no element")
return
else:
curr = self.head
while curr is not None:
print(curr.data, " ")
curr = curr.next
if __name__ == '__main__':
print("Main in linked_list.py")
|
[
"lkramer37@nevada.unr.edu"
] |
lkramer37@nevada.unr.edu
|
ff0c763f59407b9d3c0c063c09791c3c69e2368e
|
cd486d096d2c92751557f4a97a4ba81a9e6efebd
|
/17/addons/script.module.globalscrapers/lib/globalscrapers/sources/seriescr.py
|
f637099fe8f2d4e1f64cb40b9e4e65e81b496072
|
[] |
no_license
|
bopopescu/firestick-loader-kodi-data
|
2f8cb72b9da67854b64aa76f720bdad6d4112926
|
e4d7931d8f62c94f586786cd8580108b68d3aa40
|
refs/heads/master
| 2022-04-28T11:14:10.452251
| 2020-05-01T03:12:13
| 2020-05-01T03:12:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,157
|
py
|
# -*- coding: utf-8 -*-
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import debrid
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['seriescr.com']
self.base_link = 'http://seriescr.com'
self.search_link = '/search/%s/feed/rss2/'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r)
r = client.parseDOM(r, 'item')
title = client.parseDOM(r, 'title')[0]
if hdlr in title:
r = re.findall('<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL)
for name, size, url in r:
quality, info = source_utils.get_release_quality(name, url)
try:
size = re.sub('i', '', size)
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url
|
[
"esc0rtd3w@gmail.com"
] |
esc0rtd3w@gmail.com
|
2f33f46275259d3eede73d45d21a854246a06e16
|
caa81c9b08b1f7e0828f40aed68d2f2bf570ef88
|
/chat/apps.py
|
c98bc032e94eeefa6f2b5c5805fc99cc98915fc2
|
[] |
no_license
|
yp-palF/THEMATRIX
|
796d36c5dd5ba31f9d2abc72aa4b69fa009dfb34
|
e1498e893af5684f01481cfdb73a1a32b587413a
|
refs/heads/master
| 2021-05-31T13:15:15.400600
| 2016-05-20T10:44:41
| 2016-05-20T10:44:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ChatConfig(AppConfig):
name = 'chat'
|
[
"saurav24081996@gmail.com"
] |
saurav24081996@gmail.com
|
3db8eab299daa982f64741500b0a3586eca703b6
|
6bc1be8e25c5f31dac6cafb09a09f6e8bba8003a
|
/config/settings.py
|
1bab7b13e7c3766df9df970c53e21a82582b6e62
|
[] |
no_license
|
Kaburumwenda/azure-demo
|
babc2c5383c0b0896f3fe4318d68279b891d195b
|
0787c56b8c4503e403cd1b89c5818328b4866839
|
refs/heads/main
| 2023-06-30T03:16:15.232420
| 2021-08-07T20:24:08
| 2021-08-07T20:24:08
| 393,784,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-eb@jkw7#z(=xguf$2l(^0alzu)&#-ijwada$5bp^7b%7-ef+6m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'capital',
'USER': 'root',
'PASSWORD': 'cummings@2021',
'HOST': '52.151.192.131',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"yuskaburu@gmail.com"
] |
yuskaburu@gmail.com
|
7f2c8daad868cf8914c36ee7c6ae01d7827cd9c5
|
7168a7451c0c28de2e25ee444656fc7e959268ae
|
/faker_seed_db.py
|
3af4ec93275ae79f7b60079d056f243dcbee59e7
|
[] |
no_license
|
carlosbelen/HW_wk6_dy3_Create-Employee-API
|
70a726dc22cfc4333fb8952e4bbf0380dc4c528a
|
1bae7d795aab4f3c5cc920a5e70da11d59d961c5
|
refs/heads/master
| 2023-01-12T10:14:23.093854
| 2020-11-05T06:29:53
| 2020-11-05T06:29:53
| 310,208,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from faker import Faker
# Creation of faker profile helper function
def getProfile():
fake = Faker()
return fake.profile()
# Gather Data and place inside of database
import os
from flask_employee_api.models import Employee
from flask_employee_api import db
def seedData():
for seed_num in range(10):
data = getProfile()
employee = Employee(data['name'],\
data['sex'],data['address'], data['ssn'],data['mail'] )
db.session.add(employee)
db.session.commit()
seedData()
|
[
"carlosbelen2004@hotmail.com"
] |
carlosbelen2004@hotmail.com
|
c5c3103aec323910fe8b642e66cc48c188cab8ff
|
ee9e93c8b84cc8f4467e501f4c45900058818b23
|
/melbwireless/oldsite/templatetags/oldsite.py
|
a1c9051b979ba13e81a468a2c0928bb348ddca81
|
[] |
no_license
|
tysonclugg/Melbourne-Wireless
|
a433552a58ce94d040c4e69282681dc79ba8c713
|
91a196a85c24f3ad83d79f94291bd238ff2a662b
|
refs/heads/master
| 2021-01-25T06:00:43.508617
| 2014-07-11T12:02:15
| 2014-07-11T12:02:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def oldurl(context):
request = context['request']
resolver_match = request.resolver_match
try:
path = resolver_match.func.old_url_format.format(**resolver_match.kwargs)
except AttributeError:
path = request.path
return 'http://www.melbournewireless.org.au{0}'.format(path)
|
[
"tclugg@www.melbournewireless.org.au"
] |
tclugg@www.melbournewireless.org.au
|
5f21e049f53c4df8a97983ff285708d760a80dc5
|
a24b3316e29d1f45de8dd3ddc414bf87b5fdf489
|
/Lion.py
|
7effeba92ec04d7b7d93abfcae47d9406aa64adc
|
[] |
no_license
|
yamuo/python_sample
|
fd854cf414074d7e1914ef34ea41c8153e176d74
|
bbefbc7da456dd00649f88fc293211f6d28338bd
|
refs/heads/master
| 2020-03-23T01:48:05.265745
| 2018-07-16T04:59:09
| 2018-07-16T04:59:09
| 140,938,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
class Lion:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
lion = Lion("Dilbert")
print(lion)
|
[
"s-yamashita@MacBook-2.local"
] |
s-yamashita@MacBook-2.local
|
6cd8b0dc089aaae77c9808304e6170342021b2c0
|
f5244ad34315c4fa6373da1aefde1057a116dd48
|
/biblioteca/urls.py
|
00f221e30484ba12836bc9435c97647d09e84aa3
|
[] |
no_license
|
Douglas1688/biblioteca
|
52af997cbb64aa343e896507ef4fd4228ea833c6
|
20636d3362675d435fa5cc956dcb3f5da9f25f5d
|
refs/heads/master
| 2023-07-07T01:26:07.642647
| 2021-08-13T01:34:58
| 2021-08-13T01:34:58
| 394,445,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
"""biblioteca URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# from django.contrib.auth.views import LoginView,LogoutView
from django.contrib.auth.decorators import login_required
from apps.libro.views import Inicio
from apps.usuario.views import Login, logoutUsuario
urlpatterns = [
path('admin/', admin.site.urls),
path('libro/',include(('apps.libro.urls','libro'))),
path('',login_required(Inicio.as_view()),name='index'),
path('accounts/login/',Login.as_view(),name='login'),
path('logout/',login_required(logoutUsuario),name='logout'),
# path('logout/',LogoutView.as_view(),name='logout'),
# path('crear_autor/',include(('apps.libro.urls','crear_autor'))),
]
|
[
"douglas.vasquezp@hotmail.com"
] |
douglas.vasquezp@hotmail.com
|
b5cbb3671e765470ae75b2b8f8ea2f446253aef2
|
fe6c8c865fbbf7307945fa961a658244d33667ea
|
/Basics/Subclasses1.py
|
a94d2bf2018728abca885f4a9c6d6ff3d2338d59
|
[] |
no_license
|
DianaBelePublicRepos/PythonBasics
|
56b0df6242194c73ac003364ce49e3404434351c
|
38aee369a8f21a6952d3e041b828e6dfeb694d78
|
refs/heads/master
| 2021-02-07T01:21:37.777839
| 2020-05-08T21:52:26
| 2020-05-08T21:52:26
| 243,967,056
| 0
| 0
| null | 2020-03-27T16:33:47
| 2020-02-29T12:43:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
import datetime as dt
class Member:
expiry_days = 365
def __init__(self, firstname, lastname):
self.firstname = firstname
self.lastname = lastname
self.date_joined = dt.date.today()
self.expiry_date = dt.date.today() + dt.timedelta(days = Member.expiry_days)
self.secret_code = ' '
def showexpiry(self):
return f"{self.firstname} {self.lastname} expires on {self.expiry_date}"
class Admin(Member):
#Admin accounts don't expire for 100 years
expiry_days = 365.2422 * 100
#Subclass parameters
def __init__(self, firstname, lastname, secret_code):
super().__init__(firstname, lastname)
#Assign the remaining params to this object
self.secret_code = secret_code
#Subclass for Users
class User(Member):
pass
Ann = Admin("Annie", "Angst", "PRESTO")
print(Ann.firstname, Ann.lastname, Ann.expiry_date, Ann.secret_code)
Uli = User('Uli', 'Ungula')
print(Uli.firstname, Uli.lastname, Uli.expiry_date, Uli.secret_code)
|
[
"dianabeleemea@gmail.com"
] |
dianabeleemea@gmail.com
|
b8edac9b6181c5ca77aae404ac0283232bea11bd
|
39d86711fffe0de8cc8797a3aaed2edcbb034741
|
/Case/test002.py
|
aa9a265bf5110539cc436538a54be7f2deafbaeb
|
[] |
no_license
|
explorer369/appium
|
aadaec94906270c62181b63da897004d246379fb
|
a3866ac4424c5214bd62bb90ff69424a0667d347
|
refs/heads/master
| 2021-01-21T18:33:07.351013
| 2017-05-24T14:53:56
| 2017-05-24T14:53:56
| 92,018,962
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 3,133
|
py
|
import logging,os,sys,time,unittest,xlrd
reload(sys)
sys.setdefaultencoding('GBK')
def open_excel(file= 'file.xls'):
try:
data = xlrd.open_workbook(file)
return data
except Exception,e:
print str(e)
#根据索引获取Excel表格中的数据 参数:file:Excel文件路径 colnameindex:表头列名所在行的所以 ,by_index:表的索引
def excel_table_byindex(file= 'file.xls',colnameindex=0,by_index=0):
data = open_excel(file)
table = data.sheets()[by_index]
nrows = table.nrows #行数
ncols = table.ncols #列数
colnames = table.row_values(colnameindex) #某一行数据
list =[]
for rownum in range(1,nrows):
row = table.row_values(rownum)
if row:
app = {}
for i in range(len(colnames)):
app[colnames[i]] = row[i]
list.append(app)
return list
class AndroidTest(unittest.TestCase):
func = getattr(__import__('find'),'find_name') #func()# 相当于执行find.py的foo函数
def setUp(self):
SettingDevice.Setting_device(self)
def tearDown(self):
self.driver.available_ime_engines() #恢复输入法
self.driver.close_app()
self.driver.quit()
def test12309click(self):
time.sleep(5)
listdata = excel_table_byindex('data.xls',0)
if(len(listdata) <= 0 ):
assert 0,u"Excel数据库异常"
for i in range(0,int(len(listdata))):
print 'Excel中共有:%s 行数据'%(len(listdata))
time.sleep(6)
self.func("id","com.wxws.myticket:id/tv_ticket_cjkx",get_element='text') #点击城际快线
self.func("id","com.wxws.myticket:id/etBecity")
self.driver.find_element_by_id('com.wxws.myticket:id/etSearch').send_keys(listdata[i]['username'])
self.func('class_names','android.widget.TextView',1)
time.sleep(1)
# self.func("id","com.wxws.myticket:id/tv_cancel")
self.func("id","com.wxws.myticket:id/etEncity")
self.driver.find_element_by_id('com.wxws.myticket:id/etSearch').send_keys(listdata[i]['password'])
time.sleep(2)
self.func('class_names','android.widget.TextView',1)
self.func("id","com.wxws.myticket:id/btnQuery")
self.func('class_names','android.widget.LinearLayout',1)
time.sleep(1)
# self.func("xpath","//android.widget.TextView[@text='退票说明']")
# time.sleep(1)
# self.func("id","com.wxws.myticket:id/imgLeft")
time.sleep(1)
self.func("class_name","android.widget.Button")
time.sleep(1)
self.func("id","com.wxws.myticket:id/layout_Picker")
time.sleep(1)
self.func('class_names','android.widget.LinearLayout',1)
#显示金额详情
#self.func("id","com.wxws.myticket:id/rl_desc_price")
#提交订单
# self.func("id","com.wxws.myticket:id/btnPay")
#立即支付
# self.func("name"," 立即支付 ")
|
[
"noreply@github.com"
] |
explorer369.noreply@github.com
|
4947b1b86a1df54dd6417cb1e3f5dd88fea11dbf
|
51ec068b7a41dd1a184bc39ca0596300f3d1a910
|
/spiders/halftime.py
|
e3803ac91c630fbc9d68a1b4c09ab10562279e7c
|
[] |
no_license
|
wbglaeser/odds-checker
|
3549a51370e3f8736d3e53fb5d8f42ffe84aa382
|
c3299b63fb34283c2eeeb973296c689338137827
|
refs/heads/master
| 2021-01-24T21:42:32.554940
| 2018-04-06T11:44:06
| 2018-04-06T11:44:06
| 123,275,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,564
|
py
|
######################################
### BUILT LOGGER ###
######################################
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - [{}] - %(levelname)s : %(message)s'.format(__name__))
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
######################################
### Import Modules ###
######################################
import time
from bs4 import BeautifulSoup
import datetime
import numpy as np
from random import uniform
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException, NoSuchElementException, ElementNotInteractableException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
# Import items class
from backend.items import Halftime
# Modules for proxy
from browsermobproxy import Server
######################################
### Main function ###
######################################
# Define Class
class HalftimeBrowser():
def __init__(self,country):
self.country = country
# Set browser profile
def set_profile(self,add_on):
profile = webdriver.FirefoxProfile()
profile._install_extension(add_on)
return profile
# INITIALISE DRIVER
def init_driver(self,profile):
# Start browser
driver = webdriver.Firefox(firefox_profile=profile)
WebDriverWait(driver, 5)
return driver
# LOAD WEBSITE
def open_url(self,driver):
url_stem = "https://www.oddschecker.com/football/"
if self.country == "germany":
driver.get(url_stem + "germany/bundesliga")
elif self.country == "england":
driver.get(url_stem + "english/premier-league")
elif self.country == "spain":
driver.get(url_stem + "spain/la-liga-primera")
elif self.country == "italy":
driver.get(url_stem + "italy/serie-a")
elif self.country == "france":
driver.get(url_stem + "france/league-1")
# GET RID OF ADD THAT POPS UP
self.remove_popup(driver, 0)
# LOG
logger.info('Starting Page Opened.')
# RETRIEVE ITEMS
def retrieve_teams(self, driver, database, timestamp):
# Retrieve Page Type
button_selector, page_type = self.retrieve_page_type(driver.page_source)
# Set starting window
start_window = driver.window_handles[0]
count = 1
# Loop through Different Games
for game in self.wait_pl_pres(driver, 'tr.match-on '):
# Move to Second Page
self.move_to_game_page(game, driver, button_selector)
self.remove_popup(driver, 1)
#
self.move_to_odds_page(driver, page_type)
driver.close()
driver.switch_to_window(start_window)
#
# Retrieve Halftime Button if existent
# options_count, code = self.retrieve_options_index(driver)
# if code == 1:
# game_info = self.extract_game_info(driver,timestamp)
# # ------> HALFTIME ODDS
# self.wait(driver,'//*[@id="table-tabs-row"]/ul/li[{}]/a'.format(options_count)).click()
# # Collect odds for the different providers and pass them on to the database
# self.provider_odds(driver,database,game_info)
# print('Game {} successfully fetched'.format(count))
# else:
# print('There is no halftime/fulltime odds for game {} yet'.format(count))
# driver.close()
#
# count = count + 1
# else:
# print('Element not found')
######################################
### Action Function ###
######################################
def move_to_game_page(self, game, driver, button_selector):
""" This function opens the next page and adjusts the window switch. """
try:
all_odds_button = self.wait_pres(game, button_selector.replace(' ', '.'))
# Move on
all_odds_button.send_keys(Keys.SHIFT, Keys.ALT, Keys.ENTER)
logger.info('Moved On to Second Page.')
except TimeoutException as ex:
logger.error(ex)
# Obtain and switch to new window handle
WebDriverWait(driver, 10).until(EC.number_of_windows_to_be(2))
game_window = driver.window_handles[1]
driver.switch_to_window(game_window)
logger.info('Window Handle is switched.')
def move_to_odds_page(self, driver, page_type):
logger.info('Moving on to Odds Page.')
if page_type == 'NEW':
self.wait_pres(driver, 'div.market-dd.select-coupon-wrap > '
'div.selected-coupon > '
'div.market-item.selected.beta-caption1').click()
additional_options = self.wait_pl_vis(driver, 'div.market-lists > '
'ul.market-list.beta-3col-table > li >'
' a.market-item.beta-caption1')
for item in additional_options:
option = item.get_property('title')
print(option)
if option == 'Half Time/Full Time':
item.click()
logger.info('Moved on to Odds Page.')
break
# elif page_type == 'OLD':
else:
logger.info('Not Moving on to Odds Page.')
# Extract team information
def extract_game_info(self, driver, timestamp):
# Messy splitting of team names.
teams = self.wait_pres(driver, '//*[@id="betting-odds"]/div/section/div/header/h1').text
home_team = teams.split(' v ')[0].replace(" ", "")
time.sleep(0.5 * uniform(0, 1))
container = teams.split(' v ')[1]
away_team = container.split(' Winner')[0].replace(" ", "")
time.sleep(0.5 * uniform(0, 1))
# Messy splitting of date & time
datetime = self.wait_pres(driver, '//*[@id="betting-odds"]/div/section/div/div/div/div/span').text
day = datetime.split(' / ')[0]
clock = datetime.split(' / ')[1]
# Build identifier variable
day_ = day.split(' ')[1][:2]
month = day.split(' ')[2][:3]
h_team = home_team[:5]
a_team = away_team[:5]
identifier = h_team + '_' + a_team + '_' + day_ + '_' + month
# save the info in list
game_info = [home_team, away_team, day, clock, identifier, timestamp]
return game_info
# Extract the data by provider
def provider_odds(self, driver, database, game_info):
# Set up index for the different providers
provider_indices = np.arange(2, 31)
provider_indices = np.delete(provider_indices, 25)
# Loop through each of the odds providers
for i in provider_indices:
# Quick nap
time.sleep(0.5 * uniform(0, 1))
# Only include providers that have odds
if self.wait_pres(driver, '//*[@id="t1"]/tr[1]/td[{}]'.format(i)).get_attribute('data-odig') != "0":
item = Halftime()
# Assign Game Info to Database Field
game_info_features = ['home_team', 'away_team', 'date', 'time','identifier', 'accessed', 'provider']
for index, feature_id in enumerate(game_info_features):
if index != 6:
item[feature_id] = game_info[index]
else:
item[feature_id] = self.wait(driver,
'//*[@id="oddsTableContainer"]/table/thead/tr[4]/td[{}]/aside/a'.format(
i)).get_attribute('title')
# Assign Odds to Database Field
game_odds_identifier = ['home_home', 'away_away', 'draw_home', 'draw_draw', 'draw_away', 'home_draw',
'away_draw', 'away_home', 'home_away']
for index, odd_id in game_odds_identifier:
item[odd_id] = self.wait(driver, '//*[@id="t1"]/tr[{}]/td[{}]'.format(index+1,i)).get_attribute(
'data-odig')
database.process_item(item)
######################################
### Auxiliary Function ###
######################################
# This break function runs through the toolbar to check whether halftime/fulltime odds exists/ and find their index
def retrieve_options_index(self, driver):
code = 0
for index, option in enumerate(self.wait_pl_pres(driver,'//*[@id="table-tabs-row"]/ul/li')):
if option.text == "Half Time/Full Time":
code = 1
options_count = index + 1 # ??
break
return options_count, code
# Check whether game is in play
def check_in_play(self, odds):
button = odds.get_attribute('class')
not_in_play = "button beta-callout btn-1-small"
if button == not_in_play:
code = 1
else:
code = 0
return code
# Wait for element
def wait_vis(self, driver, css_selector):
""" This function returns an element identified by via the given xpath. """
return WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, css_selector)))
# Wait for element
def wait_click(self, driver, css_selector):
""" This function returns an element identified by via the given xpath. """
return WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, css_selector)))
# Wait for element
def wait_pres(self, driver, css_selector):
""" This function returns an element identified by via the given xpath. """
return WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, css_selector)))
# wait for handle
def wait_pl_vis(self, driver, css_selector):
""" This function assigns all elements identified by via the given xpath to a list. """
return WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, css_selector)))
# wait for handle
def wait_pl_pres(self, driver, css_selector):
""" This function assigns all elements identified by via the given xpath to a list. """
return WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, css_selector)))
def retrieve_page_type(self, driver_html):
""" This function checks for the page type and retrieves the button class name."""
try:
if 'class="button beta-callout btn-1-small"' in driver_html:
page_type = 'OLD'
button_selector = 'a.button beta-callout btn-1-small'
elif 'class="beta-callout"' in driver_html:
page_type = 'NEW'
button_selector = 'a.beta-callout'
logger.info('Page Type: {}'.format(page_type))
except TimeoutException:
logger.error('No Button Found.')
return button_selector, page_type
def remove_popup(self, driver, button_type):
""" This function removes popup adds. """
# Identify type of Popup
if button_type == 0:
buttons = self.wait_pl_pres(driver, 'div.content-wrapper >'
' span.inside-close-button.choose-uk')
elif button_type == 1:
buttons = self.wait_pl_pres(driver,'div#promo-modal.modal-dialog.active.offers-2 > '
'div.modal-dialog-inner > div.content-wrapper > '
'span.inside-close-button')
time.sleep(2) # Otherwise the click does not work...
# Remove Popup
for button in buttons:
try:
button.click()
logger.info('Popup Add Removed.')
except BaseException as ex:
logger.error(ex)
pass
del buttons
|
[
"w.glaeser@lse.ac.uk"
] |
w.glaeser@lse.ac.uk
|
0a3714dda87472ba3477cbe0437c0752168772be
|
f6a55d65f72512b6d75a0ea6f2296b808b7e7157
|
/firstWEB/urls.py
|
d22d87f85e8dd6d436fe89e36b62afd4c7adb23f
|
[] |
no_license
|
leeo2020/django-demo
|
1312abb5122d2ec68f1ea676ffc8f871013093e6
|
d253c0ea6f6c15ff89d11e755e1123ca0b5465c0
|
refs/heads/master
| 2023-04-20T07:55:26.497403
| 2021-05-13T10:15:57
| 2021-05-13T10:15:57
| 366,958,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('calc/', views.calc),
path('rst/', views.rst)
]
|
[
"3145875098@qq.com"
] |
3145875098@qq.com
|
1205967ca2fdd7378f1b0a3c2d76aefc99b71035
|
a5578f52de105b1d7dda2cc94b0dbfc7ddc7c91f
|
/src/manage.py
|
7db8da291e0fab6d2288f2e6631198e4333b3ad1
|
[] |
no_license
|
segimanzanares/acs-django
|
4842004882d82debc6d8370c353a40c34cc4fcb7
|
893a585616af06e21960cb09c222b79992f4dbeb
|
refs/heads/master
| 2020-03-27T14:26:21.229015
| 2018-08-30T19:37:28
| 2018-08-30T19:37:28
| 146,662,496
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'acs.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"segifredo.manzanares@copyleft.com.mx"
] |
segifredo.manzanares@copyleft.com.mx
|
5857b51e19b6f474acf8a96eb7ed7d80c8d0d712
|
ef91b74131b8791800d2365982edbfaf515ef54a
|
/day2/list_special.py
|
9d3c2702dcf822503aa42d3cbb47c7dce942b0f9
|
[] |
no_license
|
akatkar/python-training-examples
|
ec749f58698fc1cfa458246ce11069f94d25027e
|
8afa2e347c079a84740f4559e78c1be55eed5226
|
refs/heads/master
| 2020-05-16T03:32:22.613770
| 2019-04-22T09:15:14
| 2019-04-22T09:25:17
| 182,726,662
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
list1 = [1,2,3,4,5]
list2 = [6,7,8,9,10]
for i in range(len(list1)):
a = list1[i] + list2[i]
print(a)
for a, b in zip(list1,list2):
print(a+b)
|
[
"alikatkar@gmail.com"
] |
alikatkar@gmail.com
|
34bff3a0e0d3bff94503e438ba33e80cc26d37af
|
aee140df90baac9ce843d1468606672742731d8e
|
/accounts/views/signup.py
|
98d0d12a195bee4a548f1c1c031d8284b25da06c
|
[] |
no_license
|
stevartz/event-calendar
|
06c56f389c529400c3dcb157a8b003401bbf393e
|
d2d337da0bddac5af4c07cd5a8b6de04d6a2df94
|
refs/heads/main
| 2023-08-19T02:17:44.353101
| 2021-09-23T15:36:06
| 2021-09-23T15:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django.views.generic import View
from django.shortcuts import render, redirect
from accounts.forms import SignUpForm
class SignUpView(View):
""" User registration view """
template_name = 'accounts/signup.html'
form_class = SignUpForm
def get(self, request, *args, **kwargs):
forms = self.form_class()
context = {
'form': forms
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
forms = self.form_class(request.POST)
if forms.is_valid():
forms.save()
return redirect('accounts:signin')
context = {
'form': forms
}
return render(request, self.template_name, context)
|
[
"sajib1066@gmail.com"
] |
sajib1066@gmail.com
|
f39212fbdf011e70d20c0e51c1c82fab0f25700c
|
cd2aaf0097f2e244aa4a22c9da7133dd0e2f2fb8
|
/Saylani/python-code-master/17Sep2017/storingdata/storing6.py
|
cd00788649b22429c68f8d458a45ff91c0b1f3e3
|
[] |
no_license
|
EnggQasim/SSUET-2017-Module-I
|
349ea6e9b0554fa8c55899622bf0ee97fd19b685
|
cd41ab8e768616ca56ddaa1d7662283f653674f9
|
refs/heads/master
| 2020-03-25T10:36:38.330710
| 2018-09-30T13:17:38
| 2018-09-30T13:17:38
| 143,698,684
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import json
stu = {}
filename = 'numbers2.json'
with open(filename) as f_obj:
stu = json.load(f_obj)
print(stu);
print(stu["name"]);
|
[
"m.qasim077@gmail.com"
] |
m.qasim077@gmail.com
|
ab02463eb2fc0360da5da67ea204eb492611ded2
|
844d948e39d58018ad88dfc55a01b05a5b0b6a78
|
/backend/RapidRevision/views.py
|
e20d81acb34dedf7a1448824774e14311b2c3fe7
|
[] |
no_license
|
a-exploit/RapidRevison
|
77b7012cf68d6b6b39f3046d7ca1515459013aa0
|
bddf9a349907bb1625f9786c0e706819ae846e86
|
refs/heads/master
| 2023-01-06T10:19:39.917871
| 2020-09-06T19:48:55
| 2020-09-06T19:48:55
| 211,389,944
| 0
| 1
| null | 2023-01-04T11:38:57
| 2019-09-27T19:39:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
import requests
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics, mixins, status, viewsets
from youtube_transcript_api import YouTubeTranscriptApi
from rest_framework.decorators import api_view
# class AddMembers(APIView):
# def post(request,id):
# list2=[]
# list2=YouTubeTranscriptApi.get_transcript("E8lWqYvdCjQ")
# with open('/home/Untitled_2.txt', 'w+') as f:
# for item in list2:
# f.write("%s\n" % item.get("text"))
# print(request.POST)
# return Response( status=status.HTTP_200_OK)
@api_view(['GET', 'POST'])
def transcript(request):
if request.method == 'POST':
list2=[]
list3=[]
subs=''
summary=''
list2=YouTubeTranscriptApi.get_transcript(request.data.get('id'))
# with open('/home/Untitled_2.txt', 'w+') as f:
for item in list2:
subs=subs+"\n"+item.get("text")
import requests
r = requests.post(
"https://api.deepai.org/api/summarization",
data={
'text': subs,
},
headers={'api-key': 'bf8f2858-977c-4111-9331-48a15f4201bd'}
)
print(r.json().get('output'))
summary=r.json().get('output')
return Response(subs,status=status.HTTP_200_OK)
@api_view(['GET', 'POST'])
def summary(request):
if request.method == 'POST':
list2=[]
list3=[]
subs=''
summary=''
list2=YouTubeTranscriptApi.get_transcript(request.data.get('id'))
# with open('/home/Untitled_2.txt', 'w+') as f:
for item in list2:
subs=subs+"\n"+item.get("text")
import requests
r = requests.post(
"https://api.deepai.org/api/summarization",
data={
'text': subs,
},
headers={'api-key': 'bf8f2858-977c-4111-9331-48a15f4201bd'}
)
print(r.json().get('output'))
summary=r.json().get('output')
return Response(summary,status=status.HTTP_200_OK)
@api_view(['GET', 'POST'])
def keywords(request):
if request.method == 'POST':
list2=[]
list3=[]
subs=''
summary=''
list2=YouTubeTranscriptApi.get_transcript(request.data.get('id'))
# with open('/home/Untitled_2.txt', 'w+') as f:
for item in list2:
subs=subs+"\n"+item.get("text")
import requests
r = requests.post(
"https://api.deepai.org/api/text-tagging",
data={
'text': subs,
},
headers={'api-key': 'bf8f2858-977c-4111-9331-48a15f4201bd'}
)
print(r.json().get('output'))
keywords=r.json().get('output')
return Response(keywords,status=status.HTTP_200_OK)
|
[
"choudharyritik3@gmail.com"
] |
choudharyritik3@gmail.com
|
cbe572794cf3d14a5d9ee5e9c76677166b3ef83a
|
45199b72fdcddb9e24a132d961d0698d7603ad9f
|
/Day2810_HW_SoundcloudPatterns/components/player_bar.py
|
a90c04a97716d31ae2e2533f00c61f53fa3b3a8d
|
[] |
no_license
|
2gisprojectT/terehov-soundcloud
|
21db3aac98699b139abd07125e2ebdabc08ac130
|
16704541b98f0e017e3b6930efc1aa15f1e50184
|
refs/heads/master
| 2021-01-23T17:31:04.549841
| 2014-11-14T09:52:29
| 2014-11-14T09:52:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from Day2810_HW_SoundcloudPatterns.components.base_component import BaseComponent
class PlayerBar(BaseComponent):
_selectors = {
"title": "playbackTitle__link"
}
def title(self):
title = self.element.find_element_by_class_name(self._selectors["title"])
return title.text
|
[
"alexey.terekhov@bk.ru"
] |
alexey.terekhov@bk.ru
|
1a5b707d2641c2e6f5cf7187340d9d81e08873ed
|
acbe6bd6cefaf8b12070d7258dab30e4f7fcebed
|
/lib/libdbr/dateinfo.py
|
22020e47cbc869a9b5bd6362b55c8f991e4efb49
|
[
"MIT"
] |
permissive
|
RogueScholar/debreate
|
02c98c5a78d33041798410f0e3b99e80fda65d00
|
dfe9bcac7333a53082b3a2ae169806cf604d59f6
|
refs/heads/master
| 2023-06-07T11:49:03.821969
| 2023-04-28T02:14:25
| 2023-04-28T02:14:25
| 253,707,766
| 0
| 0
|
MIT
| 2023-05-28T15:24:17
| 2020-04-07T06:34:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,135
|
py
|
# ****************************************************
# * Copyright © 2023 - Jordan Irwin (AntumDeluge) *
# ****************************************************
# * This software is licensed under the MIT license. *
# * See: LICENSE.txt for details. *
# ****************************************************
## Date & time formatting.
#
# @module libdbr.dateinfo
from datetime import datetime
from time import strftime
## Formatting methods for dates & times
#
# Formats:
# DEFAULT (none), CL (changelog), LOG (logger)
class dtfmt:
DEFAULT = 0
CL = 1
LOG = 2
STAMP = 3
## Prepends a zero to single-digit numbers
#
# TODO: use use standard Python methods to pad with zeros
#
# @param number
# Integer to be modified.
# @return
# String representation of digit.
def digitToString(number):
if number < 10:
return "0{}".format(number)
return str(number)
## Retrieves the current year.
#
# @param fmt
# dtfmt to use.
# @param tostring
# If true, convert returned value to string.
# @return
# Integer or string representation of year.
def getYear(fmt=dtfmt.DEFAULT, tostring=True):
year = strftime("%Y")
if not tostring:
year = int(year)
return year
## Retrieves the current month.
#
# @param tostring
# If true, convert returned value to string.
# @return
# Integer or string representation of month.
def getMonth(tostring=True):
month = strftime("%m")
if not tostring:
month = int(month)
return month
## Retrieves the current day of the month.
#
# @param tostring
# If true, convert returned value to string.
# @return
# Integer or string representation of day.
def getDay(tostring=True):
day = strftime("%d")
if not tostring:
day = int(day)
return day
## Retrieves today's date.
#
# @param fmt
# dtfmt to use.
# @return
# String representation of date.
def getDate(fmt=dtfmt.DEFAULT):
yr = getYear()
if fmt == dtfmt.CL:
# format: Wkdy, DD Mon YYYY
return "{} {}".format(strftime("%a, %d %b"), yr)
if fmt == dtfmt.STAMP:
# format YYYYMMDD_HHMMSSmmm
return "{}_{}".format(strftime("%Y%m%d"), getTime(fmt))
# format: YYYY-MM-DD
return "{}-{}".format(yr, strftime("%m-%d"))
## Retrieves current time.
#
# @param fmt
# dtfmt to use.
# @return
# String representation of time.
def getTime(fmt=dtfmt.DEFAULT):
ms = None
current_time = None
if fmt in (dtfmt.LOG, dtfmt.STAMP,):
ms = datetime.now().strftime("%f")[:3]
if fmt == dtfmt.STAMP:
# format: HHMMSSmmm
current_time = "{}{}".format(strftime("%H%M%S"), ms)
else:
# format: HH:MM:SS.mmm
current_time = "{}.{}".format(strftime("%T"), ms)
else:
# format: HH:MM:SS
current_time = strftime("%H:%M:%S")
return current_time
## Retrieves current time zone.
#
# @param fmt
# dtfmt to use.
# @return
# String representation of timezone.
def getTimeZone(fmt=dtfmt.DEFAULT):
return strftime("%z")
## Retrievies a date string formatted for Debian changelog.
def getDebianizedDate():
return "{} {} {}".format(getDate(dtfmt.CL), getTime(dtfmt.CL), getTimeZone(dtfmt.CL))
|
[
"antumdeluge@gmail.com"
] |
antumdeluge@gmail.com
|
9af2a24dc7f424453169bb1a738a452996857e21
|
be8ea690e2516e9e66788eed28f6c2f48c03b5b2
|
/code/wikiParser/cleanContext.py
|
846a1c854b0224fcc22b7883812458053062fffc
|
[] |
no_license
|
ZhouDavid/UnderGraduProject
|
28feb808cab9cd3867217c0207371ed1b1aea322
|
075645d30cb0d1ac5680ed015a379810178ccb59
|
refs/heads/master
| 2021-01-22T18:10:40.552534
| 2017-06-22T10:20:50
| 2017-06-22T10:20:50
| 85,063,754
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#coding:utf-8
def cleanContext(contextFileName,minContextNum):
newContexts=[]
contexts = open(contextFileName,'rb').readlines()
tmp=''
for c in contexts:
c = c.decode('utf-8').strip()
if len(tmp)<minContextNum:
tmp+=c
else:
newContexts.append(tmp)
tmp=c
if len(tmp)<minContextNum:
newContexts.append(tmp)
return newContexts
if __name__ == '__main__':
contexts = cleanContext('E:\\Graduation-Project\\code\\wikiParser\\test',200)
contexts = [(c+'\n').encode('utf-8') for c in contexts]
open('test2','wb').writelines(contexts)
|
[
"Jianyu Zhou"
] |
Jianyu Zhou
|
dcee3efa896d28f24f72b0dc92e85b26353f6ca8
|
97b146cf569430818d37f3e376b10e456c106526
|
/server/TCP_MultiThreaded_HTML_Server.py
|
406bce88f5321ae3bbd2df6e858e686e5b7e01d2
|
[] |
no_license
|
RedOneLima/simple-HTTP-server
|
76b202817294c3e07b7bd199e3531125c0763e76
|
1003c606fa8c56c80d5122258a432390acc479f2
|
refs/heads/master
| 2021-01-11T18:50:09.625512
| 2017-01-21T16:30:27
| 2017-01-21T16:30:27
| 79,636,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
import threading
import SocketServer
import datetime
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
server_name = 'Simple Python Server 2.7'#name sent as server name
while 1:
try:
in_data = self.request.recv(1024)#listen/recv
data = in_data.split('\n')#seperate the incoming data from single string to a list
request_header = str(data[0]).split('/')#seperate the request from the file name
request = request_header[0]#save request(get)
file_name= request_header[1]#save file name
http_version = data[2]#not used
user_agent = data[3]#not used
except Exception:#when connection is closed
print 'Client {} on {} closed'.format(self.client_address, threading.current_thread().name)
break
else:
if str(request).upper() == 'GET':
request_code = '200 OK'
try:
file = open(file_name,'r')
request_file = file.read()
except IOError:
request_code = '404 Not Found'
else:
request_code = '400 Bad Request'
print 'From {} on {}: \n{}'.format(self.client_address,threading.current_thread().name, in_data)
if request_code == '200 OK':
response =('\n'+request_code+'\nDate: '+str(datetime.datetime.now())+'\nServer: '+server_name+'\n\n'+str(request_file)+'\r\n\r\n\r\n\r\n')
print 'To {} on {}: {}'.format(self.client_address, threading.current_thread().name,'\n'+request_code+'\nDate: '+str(datetime.datetime.now())+'\nServer: '+server_name+'\n\n')
self.request.sendall(response)
else:
response = ('\n'+request_code+'\nDate: '+str(datetime.datetime.now())+'\nServer: '+server_name+'\n\n')
print 'To {} on {}: \n{}'.format(self.client_address,threading.current_thread().name, response)
self.request.sendall(response)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = 'cs3700.msudenver.edu',5120
server = ThreadedTCPServer((HOST,PORT), ThreadedTCPRequestHandler)#create a threaded TCP server
server_thread = threading.Thread(target=server.serve_forever)#run the threaded server until terminated
server_thread.start()
print "Server loop running in thread:", server_thread.name
|
[
"khewitt08@live.com"
] |
khewitt08@live.com
|
a25dfc1f724f24ab65af4e9aff7d73130e692da4
|
9a10c0e704867c38b5eb79e6a4718378538d4d3a
|
/django实现类博客和BBS系统/bin/EdmureBlog/web/views/home.py
|
c429387da7a058150d42618e007c6c76fcdf5f24
|
[] |
no_license
|
shisanjun/django
|
811788319d779530aea7302ec7e1a434ded05b93
|
27019a08657ce30517c7e8d4684ece62bf3e154b
|
refs/heads/master
| 2022-03-11T05:41:55.581388
| 2022-02-22T07:15:59
| 2022-02-22T07:15:59
| 120,069,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,153
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.shortcuts import render,HttpResponse
from django.shortcuts import redirect
from repository import models
from utils.authication import login
from utils import pagination
from django.urls import reverse
import json
from django.db.models import Max,Count
from collections import Counter
def index(request):
"""
博客首页,展示全部博文
:param request:
:return:
"""
type=request.GET.get("type","0")
if type=="0":
article_lists = models.Article.objects.all().order_by("-nid")
else:
article_lists = models.Article.objects.filter(article_type=type).order_by("-nid")
current_page = request.GET.get('p', 1)
current_page = int(current_page)
val = request.COOKIES.get('per_page_count',10)
val = int(val)
page_obj = pagination.Page(current_page=current_page,data_count=len(article_lists),per_page_count=val)
data = article_lists[page_obj.start:page_obj.end]
page_str = page_obj.page_str(reverse("index"))
article_type=models.Article.type_choices
read_count_objs=models.Article.objects.values("nid","read_count","title").annotate(read_count_max=Max("read_count")).order_by("-read_count_max")[:7]
commnet_count_objs=models.Article.objects.values("nid","comment_count","title").annotate(comment_count_max=Max("comment_count")).order_by("-comment_count_max")[:7]
return render(request, 'index.html', {'article_lists': data,
"page_str":page_str,
"article_type":article_type,
"read_count_objs":read_count_objs,
"commnet_count_objs":commnet_count_objs,
})
def month_group():
#按年月分组
article_objs2=models.Article.objects.annotate(num_comment=Count("nid")).filter(create_time__isnull=False).order_by("-num_comment")
year_month_list=[(p.create_time.year,p.create_time.month) for p in article_objs2]
year_month_dict=Counter(year_month_list)
date_list=[(key[0],key[1],year_month_dict[key]) for key in year_month_dict]
date_list.sort(reverse=True)
return date_list
def menu(site):
blog_home = models.Blog.objects.filter(site=site).select_related('user').first()
fans_count=models.UserFans.objects.filter(user_id=blog_home.user.nid).count()
relate_fans_count=models.UserFans.objects.filter(follower_id=blog_home.user.nid).count()
category_objs=models.Category.objects.filter(blog_id=blog_home.nid)
tag_objs=models.Tag.objects.filter(blog_id=blog_home.nid)
article_objs=models.Article.objects.filter(blog_id=blog_home.nid).order_by("-nid")
month_objs=month_group
return {
"blog_home":blog_home,
"fans_count":fans_count,
"relate_fans_count":relate_fans_count,
"category_objs":category_objs,
"tag_objs":tag_objs,
"article_objs":article_objs,
"month_objs":month_objs,
"site":site
}
@login
def home(request,site):
"""
博主个人首页
:param request:
:param site: 博主的网站后缀如:http://xxx.com/wupeiqi.html
:return:
"""
render_objs=menu(site)
blog_home=render_objs.get("blog_home")
article_objs=models.Article.objects.filter(blog_id=blog_home.nid).order_by("-nid")
render_objs["article_objs"]=article_objs
return render(request, 'home.html',render_objs )
def filter(request, site, condition, val):
"""
分类显示
:param request:
:param site:
:param condition:
:param val:
:return:
"""
user_home = models.Blog.objects.filter(site=site).select_related('user').first()
if not user_home:
return redirect('/')
template_name = "home_summary_list.html"
if condition == 'tag':
# print("tag")
template_name = "home_summary_list.html"
article_list = models.Article.objects.filter(tags__nid=val, blog=user_home).all()
# print(article_list)
elif condition == 'category':
template_name = "home_summary_list.html"
article_list = models.Article.objects.filter(category__nid=val, blog=user_home).all()
elif condition == 'date':
template_name = "home_summary_list.html"
article_list = models.Article.objects.filter(blog=user_home).extra(
where=['date_format(create_time,"%%Y%%m")=%s'], params=[val, ]).all()
else:
article_list = []
menu_dict=menu(site)
menu_dict["article_list"]=article_list
return render(request, template_name,menu_dict)
def detail(request, site, nid):
"""
博文详细页
:param request:
:param site:
:param nid:
:return:
"""
render_dict=menu(site)
blog_home=render_dict.get("blog_home")
article_obj=models.Article.objects.filter(blog_id=blog_home.nid,nid=nid).first()
#阅读加1
article_obj.read_count=int(article_obj.read_count)+1
article_obj.save()
comment_objs=models.Comment.objects.filter(article_id=nid)
render_dict["article_obj"]=article_obj
render_dict["comment_objs"]=comment_objs
return render(request, 'home_detail.html',render_dict)
def up_article(request):
res={"status":False,"data":None}
if request.method=="GET":
username=request.GET.get("site")
article_id=request.GET.get("nid")
user_obj=models.UserInfo.objects.filter(username=username).first()
article_obj=models.Article.objects.filter(nid=article_id).first()
updown_obj=models.UpDown.objects.filter(article_id=article_id,user_id=user_obj.nid).first()
#不存在
if updown_obj is None:
models.UpDown.objects.create(article_id=article_id,user_id=user_obj.nid,up=True)
#文章踩加1
article_obj.up_count+=1
article_obj.save()
res["status"]=True
#赞存在
else:
#是赞还是踩
if not updown_obj.up:#不是赞
#踩改成赞
updown_obj.up=True
updown_obj.save()
article_obj.up_count+=1
article_obj.down_count-=1
article_obj.save()
res["status"]=True
return HttpResponse(json.dumps(res))
def down_article(request):
res={"status":False,"data":None}
if request.method=="GET":
username=request.GET.get("site")
article_id=request.GET.get("nid")
user_obj=models.UserInfo.objects.filter(username=username).first()
article_obj=models.Article.objects.filter(nid=article_id).first()
updown_obj=models.UpDown.objects.filter(article_id=article_id,user_id=user_obj.nid).first()
#赞不存在
if updown_obj is None:
models.UpDown.objects.create(article_id=article_id,user_id=user_obj.nid,up=False)
#文章踩加1
article_obj.down_count+=1
article_obj.save()
res["status"]=True
#赞存在
else:
#是赞还是踩
if updown_obj.up: #是赞
#赞改成踩
updown_obj.up=False
updown_obj.save()
article_obj.down_count+=1
article_obj.up_count-=1
article_obj.save()
res["status"]=True
return HttpResponse(json.dumps(res))
def replay_article(request):
"""
评论文章
:param request:
:return:
"""
# print(request.POST)
ret={"status":False,"data":None,"error":None}
if request.method=="POST":
article_id=request.POST.get("article_id")
content=request.POST.get("content")
username=request.POST.get("username")
user_obj=models.UserInfo.objects.filter(username=username).first()
try:
comment_obj=models.Comment.objects.create(content=content,article_id=article_id,user_id=user_obj.nid)
article_obj=models.Article.objects.filter(nid=article_id).first()
#评论加1
article_obj.comment_count+=1
article_obj.save()
ret["status"]=True
except:
ret["status"]=False
ret["error"]="创建失败"
return HttpResponse(json.dumps(ret))
def fans_add(request):
"""
添加关注
:param request:
:return:
"""
ret={"status":False,"data":None,"error":None}
if request.method=="GET":
site=request.GET.get("site")
username=request.GET.get("username")
if site==username:
ret["error"]="用户不能添加自己为粉丝"
else:
site_obj=models.UserInfo.objects.filter(username=site).first()
fan_user_obj=models.UserInfo.objects.filter(username=username).first()
try:
fan_obj=models.UserFans.objects.create(user_id=site_obj.nid,follower_id=fan_user_obj.nid)
ret["status"]=True
ret["data"]="关注成功,您已是[%s]的粉丝"%site
except:
ret["error"]="已是[%s]的粉丝" %site
return HttpResponse(json.dumps(ret))
def fans_cancel(request):
"""
取消关注
:param request:
:return:
"""
ret={"status":False,"data":None,"error":None}
if request.method=="GET":
site=request.GET.get("site")
username=request.GET.get("username")
if site==username:
ret["error"]="用户不能添加或者取消自己为粉丝"
else:
site_obj=models.UserInfo.objects.filter(username=site).first()
fan_user_obj=models.UserInfo.objects.filter(username=username).first()
fan_obj=models.UserFans.objects.filter(user_id=site_obj.nid,follower_id=fan_user_obj.nid)
if fan_obj is not None:
fan_obj.delete()
ret["status"]=True
ret["data"]="已取消为[%s]的粉丝"%site
return HttpResponse(json.dumps(ret))
def show_article(request,article_id):
"""
查看文章
:param request:
:param article_id:
:return:
"""
if request.method=="GET":
article_obj=models.Article.objects.filter(nid=article_id).first()
return redirect("/%s/%s.html" %(article_obj.blog.site,article_id))
def replay_otheruser(request):
"""
回复其他人
:param request:
:return:
"""
ret={"status":False,"data":None,"error":None}
if request.method=="POST":
comment_id=request.POST.get("comment_id")
replay_comment=request.POST.get("replay_comment")
article_id=request.POST.get("article_id")
username=request.POST.get("username")
user_obj=models.UserInfo.objects.filter(username=username).first()
comment_user=request.POST.get("comment_user")
comment_obj=models.Comment.objects.create(
reply_id=comment_id,
content=replay_comment,
article_id=article_id,
user_id=user_obj.nid
)
if comment_obj is not None:
ret["status"]=True
return HttpResponse(json.dumps(ret))
|
[
"lixiang_0510@126.com"
] |
lixiang_0510@126.com
|
65c468ee4af139e0b2068f1aa77294ffbc4aab21
|
bc2918dd2e5192cca4cd053a13beedd72bca6cc2
|
/accounts/models.py
|
0a1128937a8bbe8b1354bc5dbfe5189046f9e8b1
|
[] |
no_license
|
hussainjhaveri/testrepo
|
02c7eac8e2f185eae96730beb1ada13ed2879716
|
baba7f5782ff6268b491732ed482343b08ae805d
|
refs/heads/master
| 2023-01-08T14:59:25.474759
| 2020-11-13T01:39:20
| 2020-11-13T01:39:20
| 312,435,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
from django.contrib.auth.models import User
from django.db import models
types = [('dog','dog'),('cat','cat'),('bird','bird')]
class Pets(models.Model):
name = models.CharField(max_length=100,null= False)
species = models.CharField(max_length=10, choices=types )
age = models.IntegerField()
owner = models.ForeignKey(User,on_delete= models.CASCADE )
|
[
"jhaverihussain@gmail.com"
] |
jhaverihussain@gmail.com
|
2a0ca8f479f28e49d1b41af55a3c769c0c9453d6
|
a39a9e6033d0148a37f25b7d7b5af4b1ec4b8c5f
|
/speech_recognition/pytorch/train.py
|
69ac1a270be0fc7297161ca18c1aa53b791a726c
|
[] |
no_license
|
arnav-s/BlockSparse
|
180470e779eb259ec5a862a3ddd75b2d4d38f84c
|
50225daaf46803a0a684f9b17c940ef4841e90fa
|
refs/heads/master
| 2020-05-22T00:07:59.795846
| 2019-05-11T18:04:13
| 2019-05-11T18:04:13
| 186,165,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,935
|
py
|
import argparse
import errno
import json
import os
import time
import sys
import numpy as np
import random
from collections import OrderedDict
import torch
from torch.autograd import Variable
from warpctc_pytorch import CTCLoss
import torch.nn.functional as F
### Import Data Utils ###
sys.path.append('../')
from data.bucketing_sampler import BucketingSampler, SpectrogramDatasetWithLength
from data.data_loader import AudioDataLoader, SpectrogramDataset
from decoder import GreedyDecoder
from model import DeepSpeech, supported_rnns
import params
from eval_model import eval_model
###########################################################
# Comand line arguments, handled by params except seed #
###########################################################
parser = argparse.ArgumentParser(description='DeepSpeech training')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')
parser.add_argument('--save_folder', default='models/', help='Location to save epoch models')
parser.add_argument('--model_path', default='models/deepspeech_final.pth.tar',
help='Location to save best validation model')
parser.add_argument('--continue_from', default='', help='Continue from checkpoint model')
parser.add_argument('--seed', default=0xdeadbeef, type=int, help='Random Seed')
parser.add_argument('--acc', default=23.0, type=float, help='Target WER')
parser.add_argument('--start_epoch', default=-1, type=int, help='Number of epochs at which to start from')
def to_np(x):
return x.data.cpu().numpy()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_initial_slope(x,block_size):
#85% sparsity
q = np.percentile(x,85)
#based on heuristics in paper, change as you need
theta = q*100*2/(2*(0.2*params.epochs-2)+3*(0.4*params.epochs-0.2*params.epochs))
return theta*pow(block_size,0.25)
def get_threshold(model, pruning_perc):
all_weights = []
for p in model.parameters():
if len(p.data.size()) != 1:
all_weights += list(p.cpu().data.abs().numpy().flatten())
threshold = np.percentile(np.array(all_weights), pruning_perc)
return threshold
def get_blocks(x,size):
l=[]
r,c = x.shape
for i in range(0,r,size):
for j in range(0,c,size):
l.append(x[i:i+size,j:j+size])
return np.array(l)
def weight_prune(arr,threshold,size):
'''
Prune pruning_perc% weights globally (not layer-wise)
arXiv: 1606.09274
'''
'''all_weights = []
for p in model.parameters():
if len(p.data.size()) != 1:
all_weights += list(p.cpu().data.abs().numpy().flatten())
threshold = np.percentile(np.array(all_weights), pruning_perc)
'''
# generate mask
#y = [np.max(arr[i]) for i in range(len(arr))]
'''masks = []
for p in model.parameters():
if len(p.data.size()) != 1:
pruned_inds = p.data.abs() > threshold
masks.append(pruned_inds.float())
'''
blk_arr = get_blocks(arr,size)
y = [np.max(blk_arr[i]) for i in range(len(blk_arr))]
print(len(y))
pruned_inds = np.abs(y) > threshold
print(pruned_inds)
'''for i in range(len(y)):
reshaped_mask = np.full(blk_arr[i].shape,pruned_inds[i])'''
r,c = arr.shape
ctr = 0
#print(len(reshaped_mask))
for i in range(0,r,size):
for j in range(0,c,size):
#print(ctr)
#print(blk_arr[0])
#print(reshaped_mask)
arr[i:i+size,j:j+size] = arr[i:i+size,j:j+size]*pruned_inds[ctr]
ctr+=1
def main():
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if params.rnn_type == 'gru' and params.rnn_act_type != 'tanh':
print("ERROR: GRU does not currently support activations other than tanh")
sys.exit()
if params.rnn_type == 'rnn' and params.rnn_act_type != 'relu':
print("ERROR: We should be using ReLU RNNs")
sys.exit()
print("=======================================================")
for arg in vars(args):
print("***%s = %s " % (arg.ljust(25), getattr(args, arg)))
print("=======================================================")
save_folder = args.save_folder
loss_results, cer_results, wer_results = torch.Tensor(params.epochs), torch.Tensor(params.epochs), torch.Tensor(params.epochs)
best_wer = None
try:
os.makedirs(save_folder)
except OSError as e:
if e.errno == errno.EEXIST:
print('Directory already exists.')
else:
raise
criterion = CTCLoss()
with open(params.labels_path) as label_file:
labels = str(''.join(json.load(label_file)))
audio_conf = dict(sample_rate=params.sample_rate,
window_size=params.window_size,
window_stride=params.window_stride,
window=params.window,
noise_dir=params.noise_dir,
noise_prob=params.noise_prob,
noise_levels=(params.noise_min, params.noise_max))
train_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=params.train_manifest, labels=labels,
normalize=True, augment=params.augment)
test_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=params.val_manifest, labels=labels,
normalize=True, augment=False)
train_loader = AudioDataLoader(train_dataset, batch_size=params.batch_size,
num_workers=1)
test_loader = AudioDataLoader(test_dataset, batch_size=params.batch_size,
num_workers=1)
rnn_type = params.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size = params.hidden_size,
nb_layers = params.hidden_layers,
labels = labels,
rnn_type = supported_rnns[rnn_type],
audio_conf = audio_conf,
bidirectional = False,
rnn_activation = params.rnn_act_type,
bias = params.bias)
parameters = model.parameters()
optimizer = torch.optim.SGD(parameters, lr=params.lr,
momentum=params.momentum, nesterov=True,
weight_decay = params.l2)
decoder = GreedyDecoder(labels)
if args.continue_from:
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from)
model.load_state_dict(package['state_dict'])
optimizer.load_state_dict(package['optim_dict'])
start_epoch = int(package.get('epoch', 1)) - 1 # Python index start at 0 for training
start_iter = package.get('iteration', None)
if start_iter is None:
start_epoch += 1 # Assume that we saved a model after an epoch finished, so start at the next epoch.
start_iter = 0
else:
start_iter += 1
avg_loss = int(package.get('avg_loss', 0))
if args.start_epoch != -1:
start_epoch = args.start_epoch
loss_results[:start_epoch], cer_results[:start_epoch], wer_results[:start_epoch] = package['loss_results'][:start_epoch], package[ 'cer_results'][:start_epoch], package['wer_results'][:start_epoch]
print(loss_results)
epoch = start_epoch
else:
avg_loss = 0
start_epoch = 0
start_iter = 0
avg_training_loss = 0
if params.cuda:
model = torch.nn.DataParallel(model).cuda()
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
ctc_time = AverageMeter()
#can affect performance (tuning needed)
threshold = 1
curr_iter = 0
all_weights = []
for p in model.parameters():
if len(p.data.size()) != 1:
all_weights += list(p.cpu().data.abs().numpy().flatten())
#second param is block size
slope = get_initial_slope(np.array(all_weights),8*8)
wanted_weights = []
for k in model.state_dict().keys():
if 'rnn' in k:
wanted_weights.append(k)
for epoch in range(start_epoch, params.epochs):
model.train()
end = time.time()
for i, (data) in enumerate(train_loader, start=start_iter):
curr_iter+=1
if i == len(train_loader):
break
if curr_iter%100==0:
threshold = threshold*slope
inputs, targets, input_percentages, target_sizes = data
# measure data loading time
data_time.update(time.time() - end)
inputs = Variable(inputs, requires_grad=False)
target_sizes = Variable(target_sizes, requires_grad=False)
targets = Variable(targets, requires_grad=False)
if params.cuda:
inputs = inputs.cuda()
out = model(inputs)
out = out.transpose(0, 1) # TxNxH
seq_length = out.size(0)
sizes = Variable(input_percentages.mul_(int(seq_length)).int(), requires_grad=False)
ctc_start_time = time.time()
loss = criterion(out, targets, sizes, target_sizes)
ctc_time.update(time.time() - ctc_start_time)
loss = loss / inputs.size(0) # average the loss by minibatch
loss_sum = loss.data.sum()
inf = float("inf")
if loss_sum == inf or loss_sum == -inf:
print("WARNING: received an inf loss, setting loss value to 0")
loss_value = 0
else:
loss_value = loss.data[0]
avg_loss += loss_value
losses.update(loss_value, inputs.size(0))
# compute gradient
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), params.max_norm)
# SGD step
optimizer.step()
if params.cuda:
torch.cuda.synchronize()
new_state_dict = model.state_dict().copy()
if(epoch>=0.2*(params.epochs)&(epoch<=0.4*(params.epochs))):
for w in wanted_weights:
weight_prune(new_state_dict[w],threshold,8)
model.load_state_dict(new_state_dict,strict=False)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'CTC Time {ctc_time.val:.3f} ({ctc_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
(epoch + 1), (i + 1), len(train_loader), batch_time=batch_time,
data_time=data_time, ctc_time=ctc_time, loss=losses))
del loss
del out
#Add function to keep zeroed out terms
avg_loss /= len(train_loader)
print('Training Summary Epoch: [{0}]\t'
'Average Loss {loss:.3f}\t'
.format( epoch + 1, loss=avg_loss, ))
start_iter = 0 # Reset start iteration for next epoch
total_cer, total_wer = 0, 0
model.eval()
wer, cer = eval_model( model, test_loader, decoder)
loss_results[epoch] = avg_loss
wer_results[epoch] = wer
cer_results[epoch] = cer
print('Validation Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(
epoch + 1, wer=wer, cer=cer))
if args.checkpoint:
file_path = '%s/deepspeech_%d.pth.tar' % (save_folder, epoch + 1)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results),
file_path)
# anneal lr
optim_state = optimizer.state_dict()
optim_state['param_groups'][0]['lr'] = optim_state['param_groups'][0]['lr'] / params.learning_anneal
optimizer.load_state_dict(optim_state)
print('Learning rate annealed to: {lr:.6f}'.format(lr=optim_state['param_groups'][0]['lr']))
if best_wer is None or best_wer > wer:
print("Found better validated model, saving to %s" % args.model_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results)
, args.model_path)
best_wer = wer
avg_loss = 0
#If set to exit at a given accuracy, exit
if params.exit_at_acc and (best_wer <= args.acc):
break
print("=======================================================")
print("***Best WER = ", best_wer)
for arg in vars(args):
print("***%s = %s " % (arg.ljust(25), getattr(args, arg)))
print("=======================================================")
if __name__ == '__main__':
main()
|
[
"sharma55@wisc.edu"
] |
sharma55@wisc.edu
|
158fdcbd64d47adb841046556b8f916b283a7524
|
40029281c27f748dfdb7e2b2b52186f239caeed8
|
/metadataanalysis_client/models/translate_text_response.py
|
82c5cc1c1bbfbc6dc48df5d6e64e664881f99cac
|
[] |
no_license
|
daletcoreil/metadataanalysis-client-python-sdk
|
d30001c2f91c94f6aca3627735280e641839e4a1
|
9b4b495fe3f5f1d5c7d28e126def9c311c5518e5
|
refs/heads/master
| 2021-06-20T15:54:36.426663
| 2021-05-04T06:41:55
| 2021-05-04T06:41:55
| 207,790,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,868
|
py
|
# coding: utf-8
"""
Dalet Metadata Analysis API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.1.0
Contact: cortexsupport@dalet.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from metadataanalysis_client.configuration import Configuration
class TranslateTextResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'detected_source_language': 'str',
'text': 'str'
}
attribute_map = {
'detected_source_language': 'detectedSourceLanguage',
'text': 'text'
}
def __init__(self, detected_source_language=None, text=None, local_vars_configuration=None): # noqa: E501
"""TranslateTextResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._detected_source_language = None
self._text = None
self.discriminator = None
if detected_source_language is not None:
self.detected_source_language = detected_source_language
self.text = text
@property
def detected_source_language(self):
"""Gets the detected_source_language of this TranslateTextResponse. # noqa: E501
The source language that was detected by the API in case it was not specified in the request. # noqa: E501
:return: The detected_source_language of this TranslateTextResponse. # noqa: E501
:rtype: str
"""
return self._detected_source_language
@detected_source_language.setter
def detected_source_language(self, detected_source_language):
"""Sets the detected_source_language of this TranslateTextResponse.
The source language that was detected by the API in case it was not specified in the request. # noqa: E501
:param detected_source_language: The detected_source_language of this TranslateTextResponse. # noqa: E501
:type: str
"""
self._detected_source_language = detected_source_language
@property
def text(self):
"""Gets the text of this TranslateTextResponse. # noqa: E501
Translated text. # noqa: E501
:return: The text of this TranslateTextResponse. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this TranslateTextResponse.
Translated text. # noqa: E501
:param text: The text of this TranslateTextResponse. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and text is None: # noqa: E501
raise ValueError("Invalid value for `text`, must not be `None`") # noqa: E501
self._text = text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TranslateTextResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TranslateTextResponse):
return True
return self.to_dict() != other.to_dict()
|
[
"daletcoreil@gmail.com"
] |
daletcoreil@gmail.com
|
584a6b3e4bd12b8b30b38ac831abcad5b37fc763
|
bd8bc7abe0f774f84d8275c43b2b8c223d757865
|
/153_FindMinimumInRotatedSortedArray/findMin.py
|
03031fab7b0e95317febbbd14ece87fb911503d3
|
[
"MIT"
] |
permissive
|
excaliburnan/SolutionsOnLeetcodeForZZW
|
bde33ab9aebe9c80d9f16f9a62df72d269c5e187
|
64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7
|
refs/heads/master
| 2023-04-07T03:00:06.315574
| 2021-04-21T02:12:39
| 2021-04-21T02:12:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
# 解法一: 二分法-我的解法
# 执行用时 :32 ms, 在所有 Python3 提交中击败了 98.51% 的用户
# 内存消耗 : 13.2 MB, 在所有 Python3 提交中击败了 44.85% 的用户
class Solution:
def findMin(self, nums: List[int]) -> int:
left, right = 0, len(nums) - 1
if left == right or nums[left] < nums[right]:
return nums[left]
while left < right:
if right - left == 1:
return min(nums[left], nums[right])
mid = (left + right) // 2
if nums[left] < nums[mid] and nums[right] < nums[mid]:
left = mid + 1
else:
right = mid
return nums[left]
# 解法二:选用 nums[right] < nums[mid], 不用nums[left]
class Solution:
def findMin(self, nums: List[int]) -> int:
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
# 之所以不用nums[left] < nums[mid]
# 是因为,对于[2, 1]这样的会停在2,故用right不用left
if nums[right] < nums[mid]:
left = mid + 1
else:
right = mid
return nums[left]
|
[
"noreply@github.com"
] |
excaliburnan.noreply@github.com
|
0566b5c4f01c740c616a63dbd55f70956d87d809
|
6771baa15cec59d12430396ce4db207ce908105d
|
/Class1/src/information.py
|
ffbf4e76fc34d2b21e5f359fd0a8d0d33560ebef
|
[] |
no_license
|
ChandanaKotta/TA-ImageAnalysis-IIITB
|
6bda5cc7e8cb81118f266d44b9d03b0ec65161f1
|
74670bded3241fab2ca221449bed441ff141a58a
|
refs/heads/master
| 2021-05-12T10:47:42.196999
| 2018-09-08T22:16:21
| 2018-09-08T22:16:21
| 117,361,382
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
# play with bits
# the idea of this code is to illustrate the importance of bits in capturing information.
# The bits to the left (in each pixel comprised of say 8 bits) capture the maximum amount of information.
import cv2
import numpy as np
mona_lisa = cv2.imread("../images/mona.jpg")
height, width, channels = mona_lisa.shape
# the position of the bit that shall be manipulated.
bit_number = 0
cv2.imshow("mona",mona_lisa)
cv2.waitKey(0)
fused = np.zeros((height,width,3))
for i in range(height):
for j in range(width):
b,g,r = mona_lisa[i,j]
# convert from uint8 to bits, replace each pixel's nth bit with 0. Repeat for all three channels.
b = np.unpackbits(b)
b[bit_number] = 0
g = np.unpackbits(g)
g[bit_number] = 0
r = np.unpackbits(r)
r[bit_number] = 0
mona_lisa[i,j,0] = np.packbits(b)
mona_lisa[i,j,1] = np.packbits(g)
mona_lisa[i,j,2] = np.packbits(r)
cv2.imshow("altered",mona_lisa)
real_mona = cv2.imread("../images/mona.jpg")
cv2.imshow("real",real_mona)
cv2.waitKey(0)
|
[
"chandanakotta@gmail.com"
] |
chandanakotta@gmail.com
|
d459870071e09d60bcb4aa723e0973b623318343
|
449175eb373ebc622221552e43b46c9378adb618
|
/grader/files_mai/6210545629_task1.py
|
5e86412ce6df9538532eeedebe470a0a0bfb0bd7
|
[] |
no_license
|
NutthanichN/grading-helper
|
911d39211e070eafc9ffee6978f9270a0be38016
|
971c605effb4f59e9e22a32503337b3e671f120c
|
refs/heads/master
| 2022-12-12T22:46:15.062462
| 2020-09-08T06:50:12
| 2020-09-08T06:50:12
| 293,724,530
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,668
|
py
|
def check_valid_input(s):
"""
*** DOCTESTS IS HERE ***
>>> check_valid_input("rock")
True
>>> check_valid_input("paper")
True
>>> check_valid_input("scissors")
True
>>> check_valid_input("snape")
False
"""
"""*** CODE IS HERE ***"""
if s == "rock":
return True
elif s == "paper":
return True
elif s == "scissors":
return True
else:
return False
def convert_to_num(s):
"""
*** DOCTESTS IS HERE ***
>>> convert_to_num("rock")
0
>>> convert_to_num("paper")
1
>>> convert_to_num("scissors")
2
>>> convert_to_num("snape")
Error: should not reach this if input is a valid one
"""
"""*** CODE IS HERE ***"""
if s == "rock":
return 0
elif s == "paper":
return 1
elif s == "scissors":
return 2
else:
print("Error: should not reach this if input is a valid one")
def convert_to_string(n):
"""
*** DOCTESTS IS HERE ***
>>> convert_to_string(0)
'rock'
>>> convert_to_string(1)
'paper'
>>> convert_to_string(2)
'scissors'
>>> convert_to_string(1150)
Error: should not reach this if input is a valid one
"""
"""*** CODE IS HERE ***"""
if n == 0:
return "rock"
elif n == 1:
return "paper"
elif n == 2:
return "scissors"
else:
print("Error: should not reach this if input is a valid one")
def game_decision(player_choice_num, computer_choice_num):
"""
*** DOCTESTS IS HERE ***
>>> game_decision(0,0)
Both ties!
>>> game_decision(0,1)
Computer wins!
>>> game_decision(0,2)
Player wins!
>>> game_decision(1,0)
Player wins!
>>> game_decision(1,1)
Both ties!
>>> game_decision(1,2)
Computer wins!
>>> game_decision(2,0)
Computer wins!
>>> game_decision(2,1)
Player wins!
>>> game_decision(2,2)
Both ties!
"""
"""*** CODE IS HERE ***"""
if player_choice_num == computer_choice_num:
print("Both ties!")
elif ((player_choice_num + 1) % 3) == computer_choice_num:
print("Computer wins!")
else:
print("Player wins!")
# apply the rules of rock-paper-scissors
# rock-0 ; paper-1 ; scissors-2
# instead of this ugly if-elif-else nested block
"""
if player_choice_num == 0:
if computer_choice_num == 0:
print("Both ties!")
elif computer_choice_num == 1:
print("Computer wins!")
else:
print("Player wins!")
elif player_choice_num == 1:
if computer_choice_num == 1:
print("Both ties!")
elif computer_choice_num == 2:
print("Computer wins!")
else:
print("Player wins!")
else:
if computer_choice_num == 2:
print("Both ties!")
elif computer_choice_num == 0:
print("Computer wins!")
else:
print("Player wins!")
"""
def main() -> None:
# get an input from a player and validate
valid = False
while valid == False:
player_choice = input("Enter your choice: ")
valid = check_valid_input(player_choice)
if valid == False:
print("Invalid choice. Enter again.")
# random a response from a computer and print out player and computer choices
import random
computer_choice_num = random.randint(0, 2)
computer_choice = convert_to_string(computer_choice_num)
player_choice_num = convert_to_num(player_choice)
print("Players chooses ", player_choice)
print("Computer chooses ", computer_choice)
# do this
game_decision(player_choice_num, computer_choice_num)
if __name__ == "__main__":
main()
|
[
"monaprom134@gmail.com"
] |
monaprom134@gmail.com
|
46c3a80401427317e414ee45d8f5efe5cd1b83d7
|
1ea7432ec4aac84bf767f9afdb91dc74f08000a1
|
/radio_locator/locator/views.py
|
418dc2111dc98d693c53a80a714b265ae3aa50a6
|
[] |
no_license
|
skevy/Radio-Locator
|
4c9cd4952edb1a256c56e6ecae0e9e3b9e1905b5
|
1001bb3525377426d8ddc1629022c35ad265bf81
|
refs/heads/master
| 2020-11-26T19:36:17.371851
| 2011-03-30T23:11:12
| 2011-03-30T23:11:12
| 1,489,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
import json
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib.gis.geos import Point
from radio_locator.locator.models import Station
def all_stations(request):
if "lat" not in request.GET or "lng" not in request.GET:
return HttpResponseNotFound()
loc = Point(float(request.GET['lng']), float(request.GET['lat']))
stations = {}
stations['high'] = []
stations['medium'] = []
high_stations = Station.objects.filter(local_range__contains=loc).distance(loc).order_by('distance')
medium_stations = Station.objects.filter(distant_range__contains=loc).exclude(pk__in=[s.pk for s in high_stations]).distance(loc).order_by('-frequency')
for s in high_stations:
station = s.serialize()
station.update(distance=s.distance.m)
stations['high'].append(station)
for s in medium_stations:
station = s.serialize()
station.update(distance=s.distance.m)
stations['medium'].append(station)
return HttpResponse(json.dumps(stations), mimetype="application/json")
|
[
"adam.skevy@mac.com"
] |
adam.skevy@mac.com
|
96d55f8c49b1198534b1c8054dc9d7895a5fca2a
|
1a29735113eeb8061527c9e785fb3e16abe10449
|
/lib/pymod/pymod/test/command/reload.py
|
3f874458ab5cf4f6a0255d419c26515b89816da9
|
[] |
no_license
|
tjfulle/Modulecmd.py
|
db3fb96db63e42666056e8086f433a779f5bfc86
|
42e3d34b76a53f4ff557e96ba2af3cb83b963ad2
|
refs/heads/master
| 2023-02-21T10:16:49.408099
| 2021-11-18T06:29:59
| 2021-11-18T06:29:59
| 141,306,544
| 0
| 0
| null | 2019-05-09T04:51:09
| 2018-07-17T15:09:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,455
|
py
|
import pytest
import pymod.mc
import pymod.environ
from pymod.main import PymodCommand
@pytest.fixture()
def modules_path(tmpdir, namespace, modulecmds):
m = modulecmds
one = tmpdir.mkdir("1")
one.join("a.py").write(m.setenv("a"))
one.join("b.py").write(m.setenv("b") + m.load("c"))
one.join("c.py").write(m.setenv("c") + m.load("d"))
one.join("d.py").write(m.setenv("d"))
ns = namespace()
ns.path = one.strpath
return ns
@pytest.mark.unit
def test_command_reload_1(modules_path, mock_modulepath):
load = PymodCommand("load")
reload = PymodCommand("reload")
mock_modulepath(modules_path.path)
load("a")
assert pymod.environ.get("a") == "a"
reload("a")
assert pymod.environ.get("a") == "a"
# Reference count should not change
a = pymod.modulepath.get("a")
assert a.refcount == 1
@pytest.mark.unit
def test_command_reload_2(modules_path, mock_modulepath):
load = PymodCommand("load")
reload = PymodCommand("reload")
mock_modulepath(modules_path.path)
load("a")
load("b")
assert pymod.environ.get("a") == "a"
assert pymod.environ.get("b") == "b"
assert pymod.environ.get("c") == "c"
assert pymod.environ.get("d") == "d"
reload("a")
assert pymod.environ.get("a") == "a"
# Reference count should not change
a = pymod.modulepath.get("a")
b = pymod.modulepath.get("b")
assert a.refcount == 1
assert b.refcount == 1
|
[
"tjfulle@sandia.gov"
] |
tjfulle@sandia.gov
|
56cad70066f044e0659afde47b14bf62cd2aa4fe
|
5ea6abae27053fec4963a850b05a08ad2e6e7ff2
|
/Program_Tuple_3.py
|
863db410b74622654c5fdbb235fa0511aa14763a
|
[] |
no_license
|
prathusha-kunka/miniproject
|
a35814d6f93c00a18a5dd393b3e0a1751ecfba81
|
35f83d9e762d0bcf5b6fd6ab69068adc9a0fa1bd
|
refs/heads/main
| 2023-06-24T16:32:01.986588
| 2021-07-10T15:23:32
| 2021-07-10T15:23:32
| 384,718,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
# Python code to convert into dictionary
def Convert(tup, di):
for a, b in tup:
di.setdefault(a, []).append(b)
return di
# Driver Code
tups = [("Prathusha", 10), ("Rathnakar", 12), ("Pavan", 14),
("Srikanth", 20), ("Sunita", 25), ("Pavitra", 30)]
dictionary = {}
print (Convert(tups, dictionary))
|
[
"noreply@github.com"
] |
prathusha-kunka.noreply@github.com
|
8da24a5596bf65aee4468dea4c35a93036848704
|
c4d913a3811bc83e7a3838c0efdf3424b73b1ac4
|
/imagedownloader/requester/migrations/0007_auto__add_field_area_hourly_longitude__del_field_satellite_longitude.py
|
b2b6277b8f4df35b8f832db0906b576dbb751c91
|
[] |
no_license
|
tomasdelvechio/solar_radiation_model
|
d967b50b645fec3215339ed1785ffa4423b395a4
|
c306c4e4311fd887e9985401b8e2731bbf372583
|
refs/heads/master
| 2021-01-18T01:46:30.739622
| 2014-04-14T00:08:30
| 2014-04-14T00:08:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,495
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Area.hourly_longitude'
db.add_column('requester_area', 'hourly_longitude',
self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=5, decimal_places=2),
keep_default=False)
# Deleting field 'Satellite.longitude'
db.delete_column('requester_satellite', 'longitude')
def backwards(self, orm):
# Deleting field 'Area.hourly_longitude'
db.delete_column('requester_area', 'hourly_longitude')
# Adding field 'Satellite.longitude'
db.add_column('requester_satellite', 'longitude',
self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=5, decimal_places=2),
keep_default=False)
models = {
'requester.account': {
'Meta': {'object_name': 'Account'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.TextField', [], {})
},
'requester.area': {
'Meta': {'object_name': 'Area'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'east_longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'hourly_longitude': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '5', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'north_latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'south_latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'west_longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
'requester.automaticdownload': {
'Meta': {'object_name': 'AutomaticDownload'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.Area']"}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['requester.Channel']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email_server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.EmailAccount']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_simultaneous_request': ('django.db.models.fields.IntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'root_path': ('django.db.models.fields.TextField', [], {}),
'time_range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.UTCTimeRange']"}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
'requester.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_file': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'satellite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.Satellite']"})
},
'requester.emailaccount': {
'Meta': {'object_name': 'EmailAccount', '_ormbases': ['requester.Account']},
'account_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.Account']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.TextField', [], {}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'username': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
'requester.file': {
'Meta': {'object_name': 'File'},
'begin_download': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downloaded': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'end_download': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localname': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.Order']"}),
'remotename': ('django.db.models.fields.TextField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'requester.ftpserveraccount': {
'Meta': {'object_name': 'FTPServerAccount', '_ormbases': ['requester.ServerAccount']},
'hostname': ('django.db.models.fields.TextField', [], {}),
'serveraccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.ServerAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'requester.goesrequest': {
'Meta': {'object_name': 'GOESRequest', '_ormbases': ['requester.Request']},
'request_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.Request']", 'unique': 'True', 'primary_key': 'True'})
},
'requester.order': {
'Meta': {'object_name': 'Order'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downloaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'empty_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identification': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.Request']", 'unique': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.FTPServerAccount']", 'null': 'True'})
},
'requester.request': {
'Meta': {'object_name': 'Request'},
'automatic_download': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.AutomaticDownload']"}),
'begin': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'requester.satellite': {
'Meta': {'object_name': 'Satellite'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identification': ('django.db.models.fields.TextField', [], {}),
'in_file': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'request_server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requester.WebServerAccount']"})
},
'requester.serveraccount': {
'Meta': {'object_name': 'ServerAccount', '_ormbases': ['requester.Account']},
'account_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.Account']", 'unique': 'True', 'primary_key': 'True'}),
'username': ('django.db.models.fields.TextField', [], {})
},
'requester.utctimerange': {
'Meta': {'object_name': 'UTCTimeRange'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'requester.webserveraccount': {
'Meta': {'object_name': 'WebServerAccount', '_ormbases': ['requester.ServerAccount']},
'serveraccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['requester.ServerAccount']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['requester']
|
[
"eloy.colell@gmail.com"
] |
eloy.colell@gmail.com
|
62c0d10762ebddcf27c793df08917dc21ab905fa
|
b3493b4708c2aa7acced06d21ed308d210787543
|
/burga/iteracion_en _rango/rango4.py
|
acd2c0b5ab40638960e765a2a428c409cb17fbb0
|
[] |
no_license
|
arianaburga/trab07.burga.bravo
|
0ca0ae5a324f262163c713dc166ba807f9c48484
|
0955d374f5de3fb782b2d9ed65985ff88e018b25
|
refs/heads/master
| 2020-09-14T05:47:14.752471
| 2019-11-24T18:17:07
| 2019-11-24T18:17:07
| 223,038,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
# Imprimir los numeros que empiece desde-8 al 30
for i in range(-8,31):
print(i)
#fin_iterador_en_rango
print("Fin del bucle")
|
[
"aburgam@unprg.edu.pe"
] |
aburgam@unprg.edu.pe
|
e154e27e5c18326a8003714696ff81fb7c15c329
|
71c736fdcd2860dd59feb03aa05f0a8cb4bde585
|
/randomforest_clf.py
|
72fb573eb9f2089d09314228bfe50fd0b21a5681
|
[] |
no_license
|
burhanbilen/RandomForestClassifier-ile-Veri-Siniflandirma
|
e77cc5bf2eee714c9ad5b1d2b4063d4630b0c435
|
4df3a4e38646111e38815f8fc1459710b814eb90
|
refs/heads/main
| 2022-12-31T10:54:31.655500
| 2020-10-12T11:58:54
| 2020-10-12T11:58:54
| 303,376,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
df = pd.read_csv('krediVeriseti.csv', delimiter = ';')
df["evDurumu"].replace({"evsahibi": 1, "kiraci": 0}, inplace=True)
df["telefonDurumu"].replace({"var": 1, "yok": 0}, inplace=True)
df["KrediDurumu"].replace({"krediver": 1, "verme": 0}, inplace=True)
print(df.head())
X = np.array(df.iloc[:,:5])
y = np.array(df.iloc[:,5:]).reshape(len(df["KrediDurumu"]),)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state = 42)
#print(X_train.shape, X_test.shape)
#print(y_train.shape, y_test.shape)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
clf = RandomForestClassifier(max_depth = 5, n_estimators = 20)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(score)
y_tahmin = clf.predict(X_test)
print(confusion_matrix(y_test, y_tahmin))
|
[
"noreply@github.com"
] |
burhanbilen.noreply@github.com
|
291ba780cc47412365fdc0e1e22d4319fa79c276
|
5bc40f29628b2c2f4b9d3fba3f4ad0f70935d325
|
/python_local/xflib.py
|
60f9bc033db1b8a2b9896e5fb266d2f6ae62db34
|
[] |
no_license
|
asousa/thesis_figures
|
697bb88819ba22c1b669f2dea1b4fb2d1f38b66e
|
b3ad9f353b1636820b7af2c99efe4eed24b5bcf4
|
refs/heads/master
| 2022-12-01T11:04:07.436896
| 2020-08-09T07:43:53
| 2020-08-09T07:43:53
| 109,327,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,034
|
py
|
import ctypes as ct
import datetime
import numpy as np
class xflib(object):
''' A wrapper class for the xform-double coordinate transformation library.
(The fortran one Forrest used in the raytracer)
'''
def __init__(self, lib_path='libxformd.so'):
self.D2R = 3.141592653589793238462643/180.
self.R2D = 180./3.141592653589793238462643
# data types
self.i2 = ct.c_int*2
self.d3 = ct.c_double*3
# load shared library
ct.cdll.LoadLibrary(lib_path)
self.xf = ct.CDLL(lib_path)
# methods
self.geo2sm_l = self.xf.geo_to_sm_d_
self.sm2geo_l = self.xf.sm_to_geo_d_
self.geo2mag_l= self.xf.geo_to_mag_d_
self.mag2geo_l= self.xf.mag_to_geo_d_
self.s2c_l = self.xf.pol_to_cart_d_
self.c2s_l = self.xf.cart_to_pol_d_
self.gse2sm_l = self.xf.gse_to_sm_d_
self.sm2gse_l = self.xf.sm_to_gse_d_
def s2c(self, x_in):
''' spherical to cartesian (degrees)
x_in: rad, lat, lon
x_out: x, y, z
'''
# print x_in
lat_in = ct.c_double(x_in[1]*self.D2R)
lon_in = ct.c_double(x_in[2]*self.D2R)
rad_in = ct.c_double(x_in[0])
cx_out = self.d3()
self.s2c_l(ct.byref(lat_in), ct.byref(lon_in), ct.byref(rad_in), cx_out)
return [x for x in cx_out]
def c2s(self, x_in):
''' cartesian to spherical (degrees)
x_in: x, y, z
x_out: rad, lat, lon
'''
cx_in = self.d3(*x_in)
lat = ct.c_double()
lon = ct.c_double()
rad = ct.c_double()
self.c2s_l(cx_in, ct.byref(lat), ct.byref(lon), ct.byref(rad))
return [rad.value, lat.value*self.R2D, lon.value*self.R2D]
def geo2sm(self, x_in, time_in):
''' Geographic (cartesian) to Solar Magnetic
'''
# Construct yearday:
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
# print yearday
# print milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.geo2sm_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def sm2geo(self, x_in, time_in):
''' Solar Magnetic to Geographic (cartesian) '''
# Construct yearday:
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
# print yearday
# print milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.sm2geo_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def geo2mag(self, x_in, time_in):
''' Geographic (cartesian) to magnetic dipole (cartesian) '''
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.geo2mag_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def mag2geo(self, x_in, time_in):
''' Magnetic dipole (cartesian) to geographic (cartesian) '''
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.mag2geo_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def rllgeo2rllmag(self, x_in, time_in):
''' Geographic (r, lat, lon) to Geomagnetic (r, lat, lon) '''
xtmp = self.s2c(x_in)
xtmp = self.geo2mag(xtmp, time_in)
return self.c2s(xtmp)
def rllgeo2sm(self, x_in, time_in):
''' geographic (radius, lat, lon) to Solar Magnetic (cartesian) '''
xtmp = self.s2c(x_in)
return self.geo2sm(xtmp, time_in)
def sm2rllgeo(self, x_in, time_in):
''' Solar Magnetic (cartesian) geographic (radius, lat, lon) '''
xtmp = self.sm2geo(x_in, time_in)
return self.c2s(xtmp)
def rllmag2sm(self, x_in, time_in):
''' magnetic dipole (radius, lat, lon) to Solar Magnetic (cartesian) '''
xtmp = self.s2c(x_in)
xtmp = self.mag2geo(xtmp, time_in)
return self.geo2sm(xtmp, time_in)
def sm2rllmag(self, x_in, time_in):
''' Solar Magnetic (cartesian) to magnetic dipole (radius, lat, lon) '''
xtmp = self.sm2geo(x_in, time_in)
xtmp = self.geo2mag(xtmp, time_in)
return self.c2s(xtmp)
def mag2sm(self, x_in, time_in):
''' magnetic dipole (cartesian) to Solar Magnetic (cartesian) '''
xtmp = self.mag2geo(x_in, time_in)
return self.geo2sm(xtmp, time_in)
def sm2mag(self, x_in, time_in):
''' Solar Magnetic (cartesian) to magnetic dipole (cartesian) '''
xtmp = self.sm2geo(x_in, time_in)
return self.geo2mag(xtmp, time_in)
def transform_data_sph2car(self, lat, lon, d_in):
D2R = np.pi/180.
M = np.zeros([3,3])
d_out = np.zeros(3)
theta = D2R*(90. - lat)
phi = D2R*lon
st = np.sin(theta)
sp = np.sin(phi)
ct = np.cos(theta)
cp = np.cos(phi)
M[0,0] = st*cp; M[0,1] = ct*cp; M[0,2] = -sp;
M[1,0] = st*sp; M[1,1] = ct*sp; M[1,2] = cp;
M[2,0] = ct; M[2,1] = -st; M[2,2] = 0;
d_out = np.dot(M, d_in)
return d_out
def gse2sm(self, x_in, time_in):
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.gse2sm_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def sm2gse(self, x_in, time_in):
yearday = int(1000*time_in.year + time_in.timetuple().tm_yday)
milliseconds_day = int((time_in.second + time_in.minute*60 + time_in.hour*60*60)*1e3 + time_in.microsecond*1e-3)
ct_in = self.i2()
ct_in[0] = yearday
ct_in[1] = milliseconds_day
cx_in = self.d3(*x_in)
cx_out = self.d3()
self.sm2gse_l(ct_in, cx_in, cx_out)
return [x for x in cx_out]
def lon2MLT(self, itime, lon):
# // Input: itime, lon in geomagnetic dipole coords.
# // Output: MLT in fractional hours
# // Ref: "Magnetic Coordinate Systems", Laundal and Richmond
# // Space Science Review 2016, DOI 10.1007/s11214-016-0275-y
ut_hr = itime.hour + itime.minute/60 # /1000.0/60.0; #// Milliseconds to fractional hours (UT)
A1 = [1, 51.48, 0]; #// Location of Greenwich (for UT reference)
B1 = [0, 0, 0]; # B1[3] // Location of Greenwich in geomag
self.s2c(A1);
self.geo2mag(A1, itime);
self.c2s(A1);
return np.mod(ut_hr + (lon - A1[2])/15.0, 24);
def MLT2lon(self, itime, mlt):
# // Input: itime, mlt in fractional hours
# // Output: longitude in geomagnetic coordinates
# // Ref: "Magnetic Coordinate Systems", Laundal and Richmond
# // Space Science Review 2016, DOI 10.1007/s11214-016-0275-y
ut_hr = itime.hour + itime.minute/60 # /1000.0/60.0; #// Milliseconds to fractional hours (UT)
A1 = [1, 51.48, 0]; #// Location of Greenwich (for UT reference)
B1 = [0, 0, 0]; # B1[3] // Location of Greenwich in geomag
self.s2c(A1);
self.geo2mag(A1, itime);
self.c2s(A1);
return 15.*(mlt - ut_hr) + A1[2]
# xf = xflib(lib_path='/shared/users/asousa/WIPP/3dWIPP/python/libxformd.so')
# x_in = [1.,45,17]
# time_in = datetime.datetime(2001, 1, 1, 0, 0, 00);
# print x_in
# x_in = xf.s2c(x_in)
# print x_in
# # x_in = xf.c2s(x_in)
# # print x_in
# x_in = xf.geo2sm(x_in, time_in)
# print x_in
# x_in = xf.sm2geo(x_in, time_in)
# print x_in
# x_in = xf.geo2mag(x_in, time_in)
# print x_in
# x_in = xf.mag2geo(x_in, time_in)
# print x_in
|
[
"asousa@stanford.edu"
] |
asousa@stanford.edu
|
7c71dce2587c67fcd1f2b8dac1459501c7454aa7
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/list/duration/Schema+Instance/NISTXML-SV-IV-list-duration-enumeration-1-4.py
|
06b2f14b172e8529e7d41ea4ce7da0de48fe9cf1
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
from output.models.nist_data.list_pkg.duration.schema_instance.nistschema_sv_iv_list_duration_enumeration_1_xsd.nistschema_sv_iv_list_duration_enumeration_1 import NistschemaSvIvListDurationEnumeration1
from output.models.nist_data.list_pkg.duration.schema_instance.nistschema_sv_iv_list_duration_enumeration_1_xsd.nistschema_sv_iv_list_duration_enumeration_1 import NistschemaSvIvListDurationEnumeration1Type
obj = NistschemaSvIvListDurationEnumeration1(
value=NistschemaSvIvListDurationEnumeration1Type.P2018_Y03_M29_DT16_H33_M43_S_P2028_Y05_M22_DT23_H02_M30_S_P2001_Y08_M05_DT10_H25_M39_S_P1978_Y11_M04_DT20_H31_M10_S_P1999_Y04_M08_DT19_H26_M37_S_P2017_Y08_M24_DT18_H51_M57_S_P1987_Y05_M29_DT14_H30_M09_S_P1983_Y03_M09_DT03_H26_M56_S_P1985_Y06_M11_DT00_H54_M42_S
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
73df98dca78d75db99f30a6d10a2d21b1fbb876b
|
e170cea70c4e92f9d741a23553376dfd72669482
|
/ProjectCheckPoint9_Outliers/ProjectCheckPoint9_Outliers.py
|
b9e4e824e4d08f5e94dde739145b545237dd23f1
|
[] |
no_license
|
demikaiser/AutoInsuranceClaimPrediction
|
9adc00ed57c993586ba004d0c82abde66e24a177
|
0e03dbe1a6664d9d29e3250a44489b69da1d0601
|
refs/heads/master
| 2020-04-17T03:59:12.315195
| 2019-01-17T10:52:56
| 2019-01-17T10:52:56
| 166,208,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,713
|
py
|
# EXPERIMENT FOR JUSTIN'S BIG IMPROVEMENT
# Default Libraries
import math
import inspect
import itertools
# Data Science Libraries
import numpy as np
import pandas as pd
import sklearn_pandas
# sklearn Library (https://scikit-learn.org)
import sklearn.model_selection
import sklearn.linear_model
import sklearn.neighbors
import sklearn.metrics
import sklearn.tree
import sklearn.ensemble
import sklearn.naive_bayes
import sklearn.neural_network
from sklearn.externals import joblib
import xgboost
from imblearn.over_sampling import SMOTE, ADASYN
import sklearn.decomposition
class DataScienceModeler:
c_X = None
c_Y = None
c_x_train = None
c_x_test = None
c_y_train = None
c_y_test = None
c_x_train_SMOTE = None
c_y_train_SMOTE = None
c_x_train_ADASYN = None
c_y_train_ADASYN = None
r_X = None
r_Y = None
r_x_train = None
r_x_test = None
r_y_train = None
r_y_test = None
o_X = None
o_Y = None
o_x_train = None
o_x_test = None
o_y_train = None
o_y_test = None
TESTSET_X = None
COMPETITIONSET_X = None
############################################################################
# Data Pre-Processing #
############################################################################
def __init__(self):
log_file_name = "log-" + str(pd.Timestamp.now())
log_file_name = log_file_name.replace(":", "")
self.log_file_handler = open("logs//" + log_file_name, "w")
def __del__(self):
self.log_file_handler.close()
def load_trainingset(self, shuffle, PARAMETER):
self.log("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:" + str(PARAMETER))
print("<==== ====", inspect.stack()[0][3], "==== ====>")
df = pd.read_csv("trainingset.csv")
continuous_features = ["1", "2", "6", "8", "10"]
categorical_features = ["3", "4", "5", "7", "9", "11", "12",
"13", "14", "15", "16", "17", "18"]
df['Claimed'] = np.where(df['ClaimAmount'] > 0, 1, 0)
df['Outlier'] = np.where(df['ClaimAmount'] > PARAMETER, 1, 0)
col_list = list(df.columns)
col_list.remove('rowIndex')
col_list.remove('Claimed')
col_list.remove('ClaimAmount')
col_list.remove('Outlier')
col_list.insert(0, 'Outlier')
col_list.insert(0, 'ClaimAmount')
col_list.insert(0, 'Claimed')
col_list.insert(0, 'rowIndex')
df = df[col_list]
df = pd.get_dummies(
df, columns=["feature" + n for n in categorical_features],
dtype=np.int64
)
transform_mapper = sklearn_pandas.DataFrameMapper([
('rowIndex', None),
('Claimed', None),
('ClaimAmount', None),
('Outlier', None),
], default=sklearn.preprocessing.StandardScaler())
standardized = transform_mapper.fit_transform(df.copy())
df = pd.DataFrame(standardized, columns=df.columns)
print("0. Prepare the Final Data Sets (Classification)")
self.c_X = df.drop(['rowIndex', 'Claimed', 'ClaimAmount', 'Outlier'], axis=1)
self.c_Y = df.Claimed
# <Polynomial Features>
# poly = sklearn.preprocessing.PolynomialFeatures(2, include_bias=True)
# self.c_X = poly.fit_transform(self.c_X)
# <Power Transformer>
# power = sklearn.preprocessing.PowerTransformer()
# power.fit(self.c_X)
# self.c_X = power.transform(self.c_X)
# <Quantile Transform>
# self.c_X = sklearn.preprocessing.quantile_transform(self.c_X, axis=0, n_quantiles=1000,
# output_distribution='normal', ignore_implicit_zeros=False,
# subsample=100000, random_state=None, copy=False)
self.c_x_train, self.c_x_test, self.c_y_train, self.c_y_test = sklearn.model_selection\
.train_test_split(self.c_X, self.c_Y, test_size=0.30, shuffle=shuffle)
# self.c_x_train = self.c_x_train.values
# self.c_x_test = self.c_x_test.values
# self.c_y_train = self.c_y_train.values
# self.c_y_test = self.c_y_test.values
#
# print("0. SMOTE")
# self.c_x_train_SMOTE, self.c_y_train_SMOTE = SMOTE().fit_resample(self.c_x_train, self.c_y_train)
#
# print("0. ADASYN")
# self.c_x_train_ADASYN, self.c_y_train_ADASYN = ADASYN().fit_resample(self.c_x_train, self.c_y_train)
print("0. Prepare the Final Data Sets (Regression)")
self.r_X = df.drop(['rowIndex', 'Claimed', 'ClaimAmount', 'Outlier'], axis=1)
self.r_Y = df.ClaimAmount
self.r_x_train, self.r_x_test, self.r_y_train, self.r_y_test = sklearn.model_selection\
.train_test_split(self.r_X, self.r_Y, test_size=0.30, shuffle=shuffle)
print("0. Prepare the Final Data Sets (Outlier)")
self.o_X = df.drop(['rowIndex', 'Claimed', 'ClaimAmount', 'Outlier'], axis=1)
self.o_Y = df.Outlier
self.o_x_train, self.o_x_test, self.o_y_train, self.o_y_test = sklearn.model_selection\
.train_test_split(self.o_X, self.o_Y, test_size=0.30, shuffle=shuffle)
print("0. Aggressive Regression")
df_aggressive_regression = df[: int(0.7 * df.shape[0])]
df_aggressive_regression = df_aggressive_regression[df_aggressive_regression['ClaimAmount'] > 0]
print(df_aggressive_regression.shape)
OUTLIER_CUTOFF = 4647
df_aggressive_regression = df_aggressive_regression[df_aggressive_regression['ClaimAmount'] < OUTLIER_CUTOFF]
self.r_x_train_aggressive = df_aggressive_regression.drop(['rowIndex', 'Claimed', 'ClaimAmount'], axis=1)
self.r_y_train_aggressive = df_aggressive_regression.ClaimAmount
# print(df_aggressive_regression.shape)
def load_testset(self, shuffle):
print("<==== ====", inspect.stack()[0][3], "==== ====>")
df = pd.read_csv("competitionset.csv")
continuous_features = ["1", "2", "6", "8", "10"]
categorical_features = ["3", "4", "5", "7", "9", "11", "12",
"13", "14", "15", "16", "17", "18"]
col_list = list(df.columns)
col_list.remove('rowIndex')
col_list.insert(0, 'rowIndex')
df = df[col_list]
df = pd.get_dummies(
df, columns=["feature" + n for n in categorical_features],
dtype=np.int64
)
transform_mapper = sklearn_pandas.DataFrameMapper([
('rowIndex', None),
], default=sklearn.preprocessing.StandardScaler())
standardized = transform_mapper.fit_transform(df.copy())
df = pd.DataFrame(standardized, columns=df.columns)
print("0. Prepare the Final Data Sets (Regression)")
self.TESTSET_X = df.drop(['rowIndex'], axis=1)
def load_competitionset(self, shuffle):
pass
def select_features_regression(self, num_features):
print("<==== ====", inspect.stack()[0][3], "==== ====>")
feature_select_model = sklearn.tree.DecisionTreeRegressor()
trans = sklearn.feature_selection.RFE(feature_select_model, n_features_to_select=num_features)
self.r_x_train = trans.fit_transform(self.r_x_train, self.r_y_train)
self.r_x_test = trans.fit_transform(self.r_x_test, self.r_y_test)
def select_features_classification(self, num_features):
print("<==== ====", inspect.stack()[0][3], "==== ====>")
feature_select_model = sklearn.tree.DecisionTreeClassifier()
trans = sklearn.feature_selection.RFE(feature_select_model, n_features_to_select=num_features)
self.c_x_train = trans.fit_transform(self.c_x_train, self.c_y_train)
self.c_x_test = trans.fit_transform(self.c_x_test, self.c_y_test)
############################################################################
# Utilities #
############################################################################
def print_regression_performance_metrics(self, y_test, y_pred):
label_prediction_difference = np.subtract(y_test, y_pred)
MAE = np.mean(np.absolute(label_prediction_difference))
self.log("MAE: " + str(MAE))
return MAE
def print_classification_performance_metrics(self, y_test, y_pred):
confusion_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred)
self.log("Confusion Matrix:")
self.log(str(confusion_matrix))
# tn, fp, fn, tp = sklearn.metrics.confusion_matrix(y_test, y_pred).ravel()
# self.log("TN:", tn, "FP", fp, "FN", fn, "TP", tp)
f1_score = sklearn.metrics.f1_score(y_test, y_pred)
self.log("F1 Performance Score: %.6f%%" % (f1_score * 100))
return f1_score
############################################################################
# Model Execution #
############################################################################
def experiment00(self):
self.load_trainingset(False)
#### #### #### #### Classification Model #### #### #### ####
model_classification = xgboost.XGBClassifier(learning_rate=0.05, n_estimators=1000,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8,
objective='binary:logistic', nthread=4,
booster='gbtree', scale_pos_weight=20,
seed=27, reg_lambda=1, reg_alpha=.005)
model_classification = model_classification.fit(self.c_x_train, self.c_y_train)
#### #### #### #### Regression Model #### #### #### ####
# model_regression = xgboost.XGBRegressor(objective='reg:linear', colsample_bytree=0.85,
# eta=0.01, max_depth=9, alpha=10, n_estimators=1,
# booster='gbtree', min_child_weight=0, gamma=0,
# subsample=0.8, reg_alpha=100, max_delta_step=1)
#
# model_regression = model_regression.fit(self.r_x_train, self.r_y_train)
#### #### #### #### Prediction Process #### #### #### ####
TAU = 0.701
y_pred_prob = model_classification.predict_proba(self.c_x_test)
y_pred_prob = pd.DataFrame(y_pred_prob)[1]
y_pred_classification = \
y_pred_prob.apply(
lambda x: 1
if x > TAU else 0
).values
# y_pred_regression = model_regression.predict(self.r_x_test)
# y_pred_regression = y_pred_classification * y_pred_regression
self.print_classification_performance_metrics(self.c_y_test, y_pred_classification)
# self.print_regression_performance_metrics(self.r_y_test, y_pred_regression)
# EXP
y_pred_classification = model_classification.predict(self.c_x_test)
self.print_classification_performance_metrics(self.c_y_test, y_pred_classification)
def experiment01(self, PARAMETER_TO_EXPLORE):
self.load_trainingset(False)
#### #### #### #### Classification Model #### #### #### ####
# 0.47
model_classification = xgboost.XGBClassifier(learning_rate=0.1, n_estimators=100,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
objective='binary:logistic', nthread=4, n_jobs=2,
booster='gbtree', scale_pos_weight=20,
seed=0, reg_lambda=1, reg_alpha=0.1)
# model_classification = xgboost.XGBClassifier(learning_rate=0.0001, n_estimators=100,
# max_depth=1000, min_child_weight=1, gamma=0.1,
# subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
# objective='binary:logistic', nthread=4, n_jobs=2,
# booster='gbtree', scale_pos_weight=20,
# seed=0, reg_lambda=1, reg_alpha=0.1,
# grow_policy='depthwise', max_leaves=1000,
# )
def xgb_f1(y, t):
t = t.get_label()
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y]
return 'f1', sklearn.metrics.f1_score(t, y_bin)
model_classification = model_classification.fit(self.c_x_train, self.c_y_train,
eval_metric=xgb_f1,
eval_set=[(self.c_x_test, self.c_y_test)],
verbose=True)
#### #### #### #### Regression Model #### #### #### ####
model_regression = xgboost.XGBRegressor(learning_rate=0.01, n_estimators=1,
objective='reg:linear', colsample_bytree=0.85,
max_depth=100, alpha=10,
booster='gbtree', min_child_weight=0, gamma=0,
subsample=0.8, reg_alpha=100, max_delta_step=1)
model_regression = model_regression.fit(self.r_x_train_aggressive, self.r_y_train_aggressive,
eval_metric='mae',
eval_set=[(self.r_x_test, self.r_y_test)],
verbose=True)
#### #### #### #### Prediction Process #### #### #### ####
TAU_LIST = np.arange(0.001, 0.999, 0.05)
tau_best = -1
f1_score_best = -1
confusion_matrix_bext = ""
mae_best = -1
y_pred_prob = model_classification.predict_proba(self.c_x_test)
y_pred_prob = pd.DataFrame(y_pred_prob)[1]
for TAU in TAU_LIST:
self.log("---- ---- ---- ---- TAU:" + str(TAU) + " ---- ---- ---- ----")
y_pred_classification = \
y_pred_prob.apply(
lambda x: 1
if x > TAU else 0
).values
y_pred_regression = model_regression.predict(self.r_x_test)
y_pred_final = y_pred_classification * y_pred_regression
f1_current = self.print_classification_performance_metrics(self.c_y_test, y_pred_classification)
mae_current = self.print_regression_performance_metrics(self.r_y_test, y_pred_final)
if f1_current > f1_score_best:
f1_score_best = f1_current
tau_best = TAU
confusion_matrix_bext = str(sklearn.metrics.confusion_matrix(self.c_y_test, y_pred_classification))
mae_best = mae_current
self.log("<==== ==== ==== ==== REFRENCE - BEST METRICS ==== ==== ==== ====>")
self.log("Best Tau: " + str(tau_best))
self.log("Best F1 Score: " + str(f1_score_best))
self.log(confusion_matrix_bext)
self.log("Best MAE: " + str(mae_best))
self.log("<<<< <<<< <<<< <<<< REFRENCE - TRAINING METRICS >>>> >>>> >>>> >>>>")
c_y_pred_reference = model_classification.predict(self.c_x_train)
self.print_classification_performance_metrics(self.c_y_train, c_y_pred_reference)
self.log(str(model_classification))
r_y_pred_reference = model_regression.predict(self.r_x_train)
# r_y_pred_reference = pd.DataFrame(r_y_pred_reference)[0]
# r_y_pred_reference = \
# r_y_pred_reference.apply(
# lambda x: 0.0000000001
# if x > 0 else 0
# ).values
final_y_pred_reference = c_y_pred_reference * r_y_pred_reference
self.print_regression_performance_metrics(self.r_y_train, final_y_pred_reference)
self.log("<<<< <<<< <<<< <<<< REFRENCE - ALL 0s MAE >>>> >>>> >>>> >>>>")
y_pred_classification = \
y_pred_prob.apply(
lambda x: 1
if x > tau_best else 0
).values
y_pred_regression = model_regression.predict(self.r_x_test)
y_pred_regression = pd.DataFrame(y_pred_regression)[0]
y_pred_regression = \
y_pred_regression.apply(
lambda x: 0.0000000001
if x > 0 else 0
).values
y_pred_final = y_pred_classification * y_pred_regression
self.print_regression_performance_metrics(self.r_y_test, y_pred_final)
def experiment02(self):
self.load_trainingset(False)
model_classification_1 = xgboost.XGBClassifier(learning_rate=0.1, n_estimators=100,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
objective='binary:logistic', nthread=4, n_jobs=2,
booster='gbtree', scale_pos_weight=20,
seed=0, reg_lambda=1, reg_alpha=0.1)
def xgb_f1(y, t):
t = t.get_label()
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y]
return 'f1', sklearn.metrics.f1_score(t, y_bin)
model_classification_1.fit(self.c_x_train, self.c_y_train,
eval_metric=xgb_f1,
eval_set=[(self.c_x_test, self.c_y_test)],
verbose=True)
y_pred_classification_1 = model_classification_1.predict(self.c_x_test)
# model_classification_2 = sklearn.tree.ExtraTreeClassifier(
# class_weight=None, criterion='gini', max_depth=None,
# max_features='auto', max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, random_state=None,
# splitter='random')
model_classification_2 = sklearn.naive_bayes.BernoulliNB(
alpha=0.01, binarize=0.0, class_prior=None, fit_prior=True)
# model_classification_2 = sklearn.ensemble.RandomForestClassifier(
# bootstrap=True, class_weight=None, criterion='gini',
# max_depth=None, max_features='auto', max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=None,
# oob_score=False, random_state=2, verbose=0, warm_start=False)
# model_classification_2 = sklearn.ensemble.BaggingClassifier(
# base_estimator=None, n_estimators=100, max_samples=1.0, max_features=1.0,
# bootstrap=True, bootstrap_features=True, oob_score=False, warm_start=False,
# n_jobs=None, random_state=None, verbose=3)
# model_classification_2 = sklearn.neural_network.MLPClassifier(
# activation='tanh', alpha=0.01, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(70, 70, 70), learning_rate='constant',
# learning_rate_init=0.001, max_iter=300, momentum=0.9,
# n_iter_no_change=10, nesterovs_momentum=True, power_t=0.5,
# random_state=None, shuffle=True, solver='adam', tol=0.0001,
# validation_fraction=0.1, verbose=3, warm_start=False)
# model_classification_2 = sklearn.svm.SVC(
# probability=True, verbose=True
# )
# model_classification_2 = sklearn.neighbors.KNeighborsClassifier(
# n_neighbors=5
# )
model_classification_2.fit(self.c_x_train, self.c_y_train)
y_pred_classification_2 = model_classification_2.predict_proba(self.c_x_test)
y_pred_classification_2 = pd.DataFrame(y_pred_classification_2)[0]
y_pred_classification_2 = \
y_pred_classification_2.apply(
lambda x: 1
if x > 0.1 else 0
).values
#### #### #### #### Prediction Process #### #### #### ####
# TAU_LIST = np.arange(0.001, 0.999, 0.01)
#
# tau_best = -1
# f1_score_best = -1
# confusion_matrix_bext = ""
# mae_best = -1
#
# y_pred_prob = model_classification_2.predict_proba(self.c_x_test)
# y_pred_prob = pd.DataFrame(y_pred_prob)[1]
#
# for TAU in TAU_LIST:
# self.log("---- ---- ---- ---- TAU:" + str(TAU) + " ---- ---- ---- ----")
# y_pred_classification_2 = \
# y_pred_prob.apply(
# lambda x: 1
# if x > TAU else 0
# ).values
#
# y_pred_final = np.add(y_pred_classification_1, y_pred_classification_2)
# y_pred_final = pd.DataFrame(y_pred_final)[0]
# y_pred_final = \
# y_pred_final.apply(
# lambda x: 1
# if x > 0 else 0
# ).values
#
# f1_current = self.print_classification_performance_metrics(self.c_y_test, y_pred_final)
#
# if f1_current > f1_score_best:
# f1_score_best = f1_current
# tau_best = TAU
# confusion_matrix_bext = str(sklearn.metrics.confusion_matrix(self.c_y_test, y_pred_final))
#
# self.log("<==== ==== ==== ==== REFRENCE - BEST METRICS (C_TOTAL) ==== ==== ==== ====>")
# self.log("Best Tau: " + str(tau_best))
# self.log("Best F1 Score: " + str(f1_score_best))
# self.log(confusion_matrix_bext)
#
# self.log("<==== ==== ==== ==== REFRENCE - BEST METRICS (C1) ==== ==== ==== ====>")
# self.print_classification_performance_metrics(self.c_y_test, y_pred_classification_1)
model_final = sklearn.ensemble.VotingClassifier(
estimators=[('xgboost', model_classification_1), ('nb', model_classification_2)],
voting='soft',
weights=[1.8, 1],
n_jobs=None, flatten_transform=None)
model_final.fit(self.c_x_train, self.c_y_train)
y_pred = model_final.predict(self.c_x_test)
self.print_classification_performance_metrics(self.c_y_test, y_pred)
def experiment03(self):
self.load_trainingset(False)
self.load_testset(False)
model_classification_1 = xgboost.XGBClassifier(learning_rate=0.1, n_estimators=100,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
objective='binary:logistic', nthread=4, n_jobs=2,
booster='gbtree', scale_pos_weight=20,
seed=0, reg_lambda=1, reg_alpha=0.1)
model_classification_1.fit(self.c_X, self.c_Y)
model_classification_2 = sklearn.naive_bayes.BernoulliNB(
alpha=0.01, binarize=0.0, class_prior=None, fit_prior=True)
model_classification_2.fit(self.c_X, self.c_Y)
model_final = sklearn.ensemble.VotingClassifier(
estimators=[('xgboost', model_classification_1), ('nb', model_classification_2)],
voting='soft',
weights=[1.8, 1],
n_jobs=None, flatten_transform=None)
model_final.fit(self.c_X, self.c_Y)
y_pred_final = model_final.predict(self.TESTSET_X)
#### #### #### #### EXPERIMENT!!!!!!! #### #### #### ####
y_pred_final = pd.DataFrame(y_pred_final)[0]
y_pred_final = \
y_pred_final.apply(
lambda x: 0.000001
if x != 0.0 else 0
).values
# Make the submission file.
submission = pd.DataFrame(y_pred_final, columns=['ClaimAmount'])
submission.to_csv("submission.csv", index=True, index_label='rowIndex')
# Print out success message.
print("COMPLETE: submission.csv created!")
def experiment04(self):
for value in range(10, 3000, 10):
self.load_trainingset(False, value)
model_classification_1 = xgboost.XGBClassifier(learning_rate=0.1, n_estimators=100,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
objective='binary:logistic', nthread=4, n_jobs=2,
booster='gbtree', scale_pos_weight=20,
seed=0, reg_lambda=1, reg_alpha=0.1)
model_classification_1.fit(self.o_x_train, self.o_y_train)
y_pred_final = model_classification_1.predict(self.o_x_test)
self.print_classification_performance_metrics(self.c_y_test, y_pred_final)
def train(self):
# Load the training set.
self.load_trainingset(False)
#### #### #### #### Classification Model #### #### #### ####
model_classification = xgboost.XGBClassifier(learning_rate=0.1, n_estimators=100,
max_depth=30, min_child_weight=1, gamma=0.1,
subsample=0.8, colsample_bytree=0.8, colsample_bylevel=1,
objective='binary:logistic', nthread=4, n_jobs=2,
booster='gbtree', scale_pos_weight=20,
seed=0, reg_lambda=1, reg_alpha=0.1)
model_classification = model_classification.fit(self.c_X, self.c_Y)
#### #### #### #### Regression Model #### #### #### ####
model_regression = xgboost.XGBRegressor(objective='reg:linear', colsample_bytree=0.85,
eta=0.01, max_depth=9, alpha=10, n_estimators=1,
booster='gbtree', min_child_weight=0, gamma=0,
subsample=0.8, reg_alpha=100, max_delta_step=1)
model_regression = model_regression.fit(self.r_X, self.r_Y)
#### #### #### #### Save Models #### #### #### ####
joblib.dump(model_classification, 'model_classification')
joblib.dump(model_regression, 'model_regression')
def assess(self):
# Load the test set.
self.load_testset(False)
#### #### #### #### Prediction Process #### #### #### ####
model_classification = joblib.load('model_classification')
model_regression = joblib.load('model_regression')
#### #### #### #### Prediction Process #### #### #### ####
TAU = 0.701
y_pred_prob = model_classification.predict_proba(self.TESTSET_X)
y_pred_prob = pd.DataFrame(y_pred_prob)[1]
y_pred_classification = \
y_pred_prob.apply(
lambda x: 1
if x > TAU else 0
).values
y_pred_regression = model_regression.predict(self.TESTSET_X)
y_pred_final = y_pred_classification * y_pred_regression
#### #### #### #### EXPERIMENT!!!!!!! #### #### #### ####
y_pred_final = pd.DataFrame(y_pred_final)[0]
y_pred_final = \
y_pred_final.apply(
lambda x: 0.000001
if x != 0.0 else 0
).values
# Make the submission file.
submission = pd.DataFrame(y_pred_final, columns=['ClaimAmount'])
submission.to_csv("submission.csv", index=True, index_label='rowIndex')
# Print out success message.
print("COMPLETE: submission.csv created!")
def log(self, message):
self.log_file_handler.write(message + "\n")
print(message)
################################################################################
# Main #
################################################################################
if __name__ == "__main__":
# DataScienceModeler().experiment01(0.1)
# DataScienceModeler().experiment02()
# DataScienceModeler().experiment03()
DataScienceModeler().experiment04()
# DataScienceModeler().train()
# DataScienceModeler().assess()
|
[
"demikaiser13@gmail.com"
] |
demikaiser13@gmail.com
|
d5390cd3fc4c7e11dd95d9a7a65f7771fc6c78ab
|
ff14c66f799511240c7c60b31f596506a71cf908
|
/0409/0409/meow/views.py
|
52c1b60cd5ffe3394f3932df211e93b38b673b1e
|
[] |
no_license
|
tp6m35p4/JerryHW
|
bc13c73c5bbea5007c3d261db6f53196e5421aa8
|
27ae1e152f52a3b080f44d6cc96a18b64212a15f
|
refs/heads/master
| 2021-04-06T08:19:01.396037
| 2018-05-09T05:19:38
| 2018-05-09T05:19:38
| 125,303,452
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hello I'm Here.")
|
[
"tp6m35p4@gmail.com"
] |
tp6m35p4@gmail.com
|
fd49ae7016a0ac3a6fdc35521bd8a09a22ee5974
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/right_eye/case_and_part/man_or_hand/ask_bad_number_by_large_child/want_hand_at_right_case/other_child.py
|
db2e3c9028494251d2f2119fdfeb68e0bc8bb6e7
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#! /usr/bin/env python
def fact(str_arg):
company(str_arg)
print('few_man')
def company(str_arg):
print(str_arg)
if __name__ == '__main__':
fact('last_child_and_child')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
0c0da40ba3c1e233fa9350f6e73e4cf4b791d500
|
704e6f1297c6f9377f804639abec16b954459304
|
/左神/02/z_n_06_旋转正方形矩阵.py
|
8862ce3d3f227c483d4e6b35828fcd503fc1b4a1
|
[] |
no_license
|
Pysuper/LetCODE
|
a6d30a3f13445fee903d2823bc6835c1398a4362
|
a42098599bac4188eccb447de146434bc236a70a
|
refs/heads/master
| 2021-05-25T23:21:55.515406
| 2021-04-19T10:40:56
| 2021-04-19T10:40:56
| 253,962,304
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/4/15 22:11
# @Author : Zheng Xingtao
# @File : z_n_06_旋转正方形矩阵.py
"""
旋转正方形矩阵
【题目】 给定一个整型正方形矩阵matrix,请把该矩阵调整成顺时针旋转90度的样子。
【要求】 额外空间复杂度为O(1)。
"""
|
[
"1821764535@qq.com"
] |
1821764535@qq.com
|
5182128bfddbdf8b4bed4d80cd569be3a13fe406
|
eabba17ce7e4aa5c05b19c3e6c3655c8bbad64c7
|
/src/optimizer/flash.py
|
dd5f6fe873be55fce8435560356622933b8c2828
|
[] |
no_license
|
FahmidMorshed/proper-learning-SATD
|
d1fc819667ffd7107ac7194d8e6c1e98fd3e5df4
|
09c6cae621cb706d887066576e6f8264b24a094a
|
refs/heads/master
| 2020-04-14T17:56:32.300784
| 2019-12-12T01:05:01
| 2019-12-12T01:05:01
| 164,000,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,086
|
py
|
import random
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.tree import DecisionTreeRegressor
from optimizer.tuner import DT_TUNER
import numpy as np
BUDGET = 10
POOL_SIZE = 10000
INIT_POOL_SIZE = 10
def tune_dt(x_train, y_train, project_name):
tuner = DT_TUNER()
sss = StratifiedShuffleSplit(n_splits=1, test_size=.2, random_state=0)
for train_index, tune_index in sss.split(x_train, y_train):
x_train_flash, x_tune_flash = x_train[train_index], x_train[tune_index]
y_train_flash, y_tune_flash = y_train.iloc[train_index], y_train.iloc[tune_index]
best_conf = tune_with_flash(tuner, x_train_flash, y_train_flash, x_tune_flash, y_tune_flash, project_name,
random_seed=1)
return best_conf
def tune_with_flash(tuner, x_train, y_train, x_tune, y_tune, project_name, random_seed=0):
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
random.seed(random_seed)
print("DEFAULT F1: " + str(measure_fitness(tuner, x_train, y_train, x_tune, y_tune, tuner.default_config)))
this_budget = BUDGET
# Make initial population
param_search_space = tuner.generate_param_pools(POOL_SIZE)
# Evaluate initial pool
evaluted_configs = random.sample(param_search_space, INIT_POOL_SIZE)
#param_search_space = list(set(param_search_space) - (set(evaluted_configs)))
f_scores = [measure_fitness(tuner, x_train, y_train, x_tune, y_tune, configs) for configs in evaluted_configs]
# Filtering NaN case
evaluted_configs, f_scores = filter_no_info(project_name, evaluted_configs, f_scores)
print(project_name + " | F Score of init pool: " + str(f_scores))
# hold best values
ids = np.argsort(f_scores)[::-1][:1]
best_f = f_scores[ids[0]]
best_config = evaluted_configs[ids[0]]
# converting str value to int for CART to work
evaluted_configs = [tuner.transform_to_numeric(x) for x in evaluted_configs]
param_search_space = [tuner.transform_to_numeric(x) for x in param_search_space]
# number of eval
eval = 0
while this_budget > 0:
cart_model = DecisionTreeRegressor(random_state=0)
cart_model.fit(evaluted_configs, f_scores)
next_config_id = acquisition_fn(param_search_space, cart_model)
next_config = param_search_space.pop(next_config_id)
next_config_normal = tuner.reverse_transform_from_numeric(next_config)
next_f = measure_fitness(tuner, x_train, y_train, x_tune, y_tune, next_config_normal)
if np.isnan(next_f) or next_f == 0:
continue
f_scores.append(next_f)
evaluted_configs.append(next_config)
if isBetter(next_f, best_f):
best_config = next_config_normal
best_f = next_f
this_budget += 1
print(project_name + " | new F: " + str(best_f) + " budget " + str(this_budget))
this_budget -= 1
eval += 1
print(project_name + " | Eval: " + str(eval))
return best_config
def acquisition_fn(search_space, cart_model):
predicted = cart_model.predict(search_space)
ids = np.argsort(predicted)[::-1][:1]
val = predicted[ids[0]]
return ids[0]
def isBetter(new, old):
return old < new
def measure_fitness(tuner, x_train, y_train, x_tune, y_tune, configs):
clf = tuner.get_clf(configs)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_tune)
cmat = confusion_matrix(y_tune, y_pred)
return calc_f(cmat)
def calc_f(cmat):
# Precision
# ---------
prec = cmat[1, 1] / (cmat[1, 1] + cmat[0, 1])
# Recall
# ------
recall = cmat[1, 1] / (cmat[1, 1] + cmat[1, 0])
# F1 Score
# --------
f1 = 2 * (prec * recall) / (prec + recall)
return f1
def filter_no_info(label, evaluated_configs, fscores):
for i, score in enumerate(fscores):
if np.isnan(score) or score == 0:
del evaluated_configs[i]
del fscores[i]
return evaluated_configs, fscores
|
[
"ffahid@ncsu.edu"
] |
ffahid@ncsu.edu
|
fc41caa47540e6d53c846a96c85678df4e9dad14
|
df3853b41ed05d86f5bcd992fcc265f637c67784
|
/big_deal/13.py
|
307bc522e9c62b66886a18c198eac20a92395433
|
[] |
no_license
|
KseniaMIPT/Adamasta
|
6ab0121519581dbbbf6ae788d1da85f545f718d1
|
e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3
|
refs/heads/master
| 2021-01-10T16:48:31.141709
| 2016-11-23T21:02:25
| 2016-11-23T21:02:25
| 43,350,507
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
import pprint
import copy
def digraph_from_input():
N = int(input())
digraph = {}
for i in range(N):
line = input().split()
if line[0] not in digraph:
digraph[line[0]] = {line[1]}
else:
digraph[line[0]].add(line[1])
if line[1] not in digraph:
digraph[line[1]] = set()
return digraph
def square_graph(graph):
new_graph = copy.deepcopy(graph)
for key in graph:
for node in graph[key]:
for far_node in graph[node]:
if far_node not in graph[key]:
new_graph[key].add(far_node)
return new_graph
digraph = digraph_from_input()
graph = square_graph(digraph)
pprint.pprint(graph)
|
[
"ksenia22.11@yandex.ru"
] |
ksenia22.11@yandex.ru
|
de0b8eb5c42bafb75c648bb89135bbf3180a9711
|
453f1c74dfeb4e42489bff82d2c278b809b7e30f
|
/sparse_array.py
|
c04ce1e8253c8750dc00c246a236a7c253061b77
|
[] |
no_license
|
zixizhong123/7-16-21
|
a8696baa331787533211df7817aca64f992f5759
|
f306678da122c0019016722f43f0fdcbbc177121
|
refs/heads/main
| 2023-06-26T16:36:03.351714
| 2021-07-26T17:53:11
| 2021-07-26T17:53:11
| 386,723,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# problem overview: https://www.hackerrank.com/challenges/sparse-arrays/problem
def matchingStrings(strings, queries):
result = []
reference = {}
for s_item in strings:
if s_item in reference:
reference[s_item] += 1
else:
reference[s_item] = 1
for q_item in queries:
if q_item in reference:
result.append(reference[q_item])
else:
result.append(0)
return result
|
[
"zixi.zhong567@gmail.com"
] |
zixi.zhong567@gmail.com
|
39ed07c21541243ac2a44a54ab21ceb442986115
|
b0e36e26ed289c2ffccc0d57a6dafe4614f5a426
|
/cv2license.py
|
d89431018dc02e2dd72975c3fb6fe0fd884b27f1
|
[
"MIT"
] |
permissive
|
kushalasn/Plate-geometry-using-opencv
|
a740e78876e9c00b1fa8a237529e7ab7a2b46882
|
7f39fc9c8d19864f1c5d0770f21a850d00bf7243
|
refs/heads/master
| 2020-08-09T00:11:27.616144
| 2019-10-09T15:34:23
| 2019-10-09T15:34:23
| 213,955,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
import cv2
import imutils
import numpy as np
from PIL import Image
img = cv2.imread('D:/data folder/image_0438.jpg',cv2.IMREAD_COLOR)
img = cv2.resize(img, (620,480) )
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17) #Blur to reduce noise
edged = cv2.Canny(gray, 30, 200) #Perform Edge detection
# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print( "No contour detected")
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
# Masking the part other than the number plate
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]
cv2.imshow(Cropped)
cv2.WaitKey(1)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
kushalasn.noreply@github.com
|
db920bd11474bf967edf13751c6246c0a8481735
|
081afb9d33619fe5c17d8d06c7a95d1bee8a4906
|
/gpic_dl.py
|
73e06452b614b3765685bea7c013cb123c6312de
|
[] |
no_license
|
seekertrue/patent
|
9d2ebe1bea551929da8418c965ffd06743d2bd54
|
0766441ceb0ae01129a3cd33abbf3e09b303b655
|
refs/heads/master
| 2022-11-11T05:23:47.935682
| 2020-06-28T03:40:53
| 2020-06-28T03:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,923
|
py
|
# -*- coding: utf-8 -*-
import random, time, os, shutil
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from urllib.parse import unquote as up
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC
import selenium.webdriver.support.ui as ui
'''
@FileName : gpic_dl.py
@Created : 2020/04/17
@Updated : 2020/04/18
@Author : goonhope@gmail.com
@Function : 专利清单及全文下载_广东省知识产权公共信息综合服务平台(需要登录)
@url : http://s.gpic.gd.cn/route/hostingplatform/search/searchIndex
'''
class ChromeDriver(object):
def __init__(self, driver):
self.driver = driver
self.driver.maximize_window()
def init_page(self,url=r"http://s.gpic.gd.cn/route/hostingplatform/search/searchIndex"):
self.driver.get(url)
self.driver.implicitly_wait(2)
def show(self,css,timeout=25, gone=False):
located = EC.visibility_of_element_located((By.CSS_SELECTOR , css))
try:
ui.WebDriverWait(self.driver, timeout).until(located) if not gone else \
ui.WebDriverWait(self.driver, timeout).until_not(located)
return True
except TimeoutException:
return False
def input_key(self,ids,css="input.el-input__inner"):
if isinstance(ids,str):
input_el = self.driver.find_element_by_css_selector(css)
input_el.clear()
input_el.send_keys(ids)
else:
input_el = self.driver.find_elements_by_css_selector(css)[:2]
if len(ids) == len(input_el):
for x,i in zip(input_el,ids):
ActionChains(self.driver).double_click(x).perform()
# x.clear()
x.send_keys(i)
else:
print("check input_key function")
time.sleep(0.5)
def click_by_css(self, css="li.el-menu-item.pull-right a"):
search_el = self.driver.find_element_by_css_selector(css)
search_el.click()
time.sleep(3)
def download(self, css="label[class^=el-radio-button]",list_pdf=True,xn="公司"):
''''下载'''
search_el = self.driver.find_elements_by_css_selector(css)
num = 0 if list_pdf else 3 # 0 清单 3打包pdf文件
search_el[num].click()
self.click_by_css("div.el-col.el-col-24 > button")
# time.sleep(slp)
dcss = "div.el-col.el-col-24 > div > a"
self.click_by_css(dcss) if self.show(dcss) else None
xname = self.driver.find_element_by_css_selector(dcss).get_attribute("href")
xname = up(os.path.split(xname)[-1]) # url解码
move_file(xname, xn)
def grapHtml(self, css="span[class^='infoInlineSpan']"):
content = [x.text.split(":")[-1].strip() for x in self.driver.find_elements_by_css_selector(css)]
content = "\t".join(content)
[print(i, sep="\t") if _%5 != 4 else print(i) for _,i in enumerate(content)]
def go(self,ids,company):
"""执行程序"""
self.init_page()
self.click_by_css()
self.input_key(ids,)
self.click_by_css("div.el-form-item__content>button")
self.input_key(company)
self.click_by_css("button")
self.driver.switch_to.window(self.driver.window_handles[1])
self.click_by_css("div.tool-item>div>button")
time.sleep(random.uniform(2,3))
self.click_by_css("div>div.tool-item>a")
self.download(xn=company) # 下载list
self.click_by_css("div.el-col.el-col-24 > button") # 关闭
self.click_by_css("div>div.tool-item>a")
self.download(xn=company,list_pdf=False) # 下载pdf
self.driver.quit()
def move_file(xname,xn,show=True):
''''移动到指定目录'''
dir_default = r"D:\Downloads" # chrome 默认下载目录
subdir = r"D:\Temp" # 移动到目录
xo = os.path.join(dir_default, xname)
for x in ["有限公司","科技"]: xn = xn.replace(x,"")
xn = xn + "_专利清单_2020" + os.path.splitext(xname)[-1]
fxn = os.path.join(dir_default, xn)
time.sleep(2) if not os.path.exists(xo) else None # 等待文件下载
os.renames(xo, fxn)
time.sleep(2)
oxn = os.path.join(subdir, xn) # 最终文件
os.remove(oxn) if os.path.exists(oxn) else None # 二次删除原有
shutil.move(fxn, subdir) # 移动
os.system("start %s" % subdir) if show else None
return True if os.path.exists(oxn) else False
def main():
"""专利 下载"""
company = input("查询:")
ids = ("*************","*****") # 账号密码
driver = webdriver.Chrome()
cd = ChromeDriver(driver)
try:
cd.go(ids, company)
except Exception as e:
print(str(e))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
seekertrue.noreply@github.com
|
2b67df35926f8719de9439d943145dbd149ddec2
|
12c04c1c1751f2eedf651f386b674a8f7f85cf76
|
/fixture/soap.py
|
a4d64479bb890bca9a4eac739fc7a6c97f027858
|
[] |
no_license
|
araevskiy/python_training_mantis
|
72b79441f2775a0908a66a1b47409524ae8cd924
|
33f54ab8a538bdf9cafb4fbdcbe9440d1becfc10
|
refs/heads/master
| 2023-03-05T21:04:46.097521
| 2021-02-13T17:36:33
| 2021-02-13T17:36:33
| 337,806,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password, baseURL):
client = Client(baseURL + 'api/soap/mantisconnect.php?wsdl')
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_project_list(self, username, password, baseURL):
client = Client(baseURL + 'api/soap/mantisconnect.php?wsdl')
def convert(project):
return Project(identifier=str(project.id), name=project.name, description=project.description)
try:
list_projects = client.service.mc_projects_get_user_accessible(username, password)
return list(map(convert, list_projects))
except WebFault:
return False
|
[
"aeg2611@gmail.com"
] |
aeg2611@gmail.com
|
6eae88ca2e62179b92a37e3ede88af8e8da3f9d7
|
7a4d0c70b3fea5996907fc5b09f97cbc8c394a9b
|
/torch_connectomics/data/dataset/dataset_mask_skeleton_central.py
|
f2de1da521f2eb4cb9b93bc5a9aa2c63459c4180
|
[
"MIT"
] |
permissive
|
al093/pytorch_connectomics
|
af09672382088af10150ceb5a107142ff9a2e43e
|
52821951233b061102380fc0d2521843652c580a
|
refs/heads/master
| 2021-06-28T03:19:09.629623
| 2021-01-29T21:46:20
| 2021-01-29T21:46:20
| 188,486,004
| 2
| 0
|
MIT
| 2020-09-22T08:49:09
| 2019-05-24T20:51:20
|
Python
|
UTF-8
|
Python
| false
| false
| 11,267
|
py
|
from __future__ import print_function, division
import numpy as np
import torch
import torch.utils.data
import scipy
from scipy.ndimage import label as scipy_label
import scipy.ndimage.morphology as morphology
from scipy import spatial
import skimage
from .misc import crop_volume, rebalance_binary_class
from torch_connectomics.utils.vis import save_data
class MaskAndSkeletonCentralDataset(torch.utils.data.Dataset):
def __init__(self,
volume, label=None, skeleton=None,
sample_input_size=(8, 64, 64),
sample_label_size=None,
sample_stride=(1, 1, 1),
augmentor=None,
mode='train',
seed_points=None,
pad_size=None,
multisegment_gt=True):
if mode == 'test':
for x in seed_points:
assert len(x) == 1
self.mode = mode
self.input = volume
self.label = label
self.skeleton = skeleton
self.augmentor = augmentor # data augmentation
# samples, channels, depths, rows, cols
self.input_size = [np.array(x.shape) for x in self.input] # volume size, could be multi-volume input
self.sample_input_size = np.array(sample_input_size) # model input size
self.sample_label_size = np.array(sample_label_size) # model label size
self.seed_points = seed_points
self.half_input_sz = (sample_input_size//2)
self.seed_points_offset = pad_size - self.half_input_sz
self.sample_num = np.array([(np.sum([y.shape[0] for y in x])) for x in self.seed_points])
self.sample_num_a = np.sum(self.sample_num)
self.sample_num_c = np.cumsum([0] + list(self.sample_num))
# specifies if there are multiple segments in the GT, if yes then we need to keep only the central segment while calling get_item
self.multisegment_gt = multisegment_gt
self.dilation_sel = scipy.ndimage.generate_binary_structure(3, 1)
def __len__(self): # number of seed points
return self.sample_num_a
def __getitem__(self, index):
vol_size = self.sample_input_size
valid_mask = None
# Train Mode Specific Operations:
if self.mode == 'train':
# 2. get input volume
seed = np.random.RandomState(index)
# if elastic deformation: need different receptive field
# change vol_size first
pos = self.get_pos_seed(seed)
out_label = crop_volume(self.label[pos[0]], vol_size, pos[1:])
out_input = crop_volume(self.input[pos[0]], vol_size, pos[1:])
out_skeleton = crop_volume(self.skeleton[pos[0]], vol_size, pos[1:])
# select the center segment and delete the rest
# this is needed only for parallel fibers, for the single neuron prediction only perform cc and remove
# the non central segments
if self.multisegment_gt:
seg_id_to_keep = out_label[tuple(self.half_input_sz)]
out_label = self.keep_seg(out_label, seg_id_to_keep)
out_skeleton = self.keep_seg(out_skeleton, seg_id_to_keep)
# import pdb; pdb.set_trace()
# Remove non central segment
if out_skeleton.sum() == 0:
save_data(out_skeleton, 'skel.h5')
save_data(out_label, 'segment.h5')
print('Skeleton is empty after cropping from original volume.')
assert False
if out_label.sum() == 0:
save_data(out_skeleton, 'skel.h5')
save_data(out_label, 'segment.h5')
print('Out label is empty.')
assert False
out_label = out_label.copy()
out_skeleton = out_skeleton.copy()
out_input = out_input.copy()
# out_label = self.remove_non_central_seg(out_label)
# out_skeleton_temp = out_skeleton
# out_skeleton = out_skeleton * out_label # remove any skeleton part outside the segment, ensure a copy is created
# if out_skeleton.sum() == 0:
# save_data(out_skeleton_temp, 'cropped_selected_skel.h5')
# save_data(out_skeleton, 'processed_skel.h5')
# save_data(out_label, 'segment.h5')
# print('Skeleton is empty after removing non central part. Should not happen')
# assert False
# 3. augmentation
if self.augmentor is not None: # augmentation
data = {'image':out_input, 'label':out_label.astype(np.float32), 'input_label':out_skeleton.astype(np.float32)}
augmented = self.augmentor(data, random_state=seed)
out_input, out_label, out_skeleton = augmented['image'], augmented['label'], augmented['input_label']
out_input = out_input.astype(np.float32)
out_label = out_label.astype(np.float32)
out_skeleton = out_skeleton.astype(np.float32)
if (out_label.shape[0] != 64):
import pdb; pdb.set_trace()
if out_skeleton.sum() == 0:
print('Skeleton is empty after Aug. Should not happen')
assert False
# Test Mode Specific Operations:
elif self.mode == 'test':
# test mode
pos = self.get_pos_test(index)
out_input = crop_volume(self.input[pos[0]], vol_size, pos[1:])
out_label = None if self.label is None else crop_volume(self.label[pos[0]], vol_size, pos[1:])
if out_skeleton is not None and out_label is not None:
out_flux = self.compute_flux(out_label, out_skeleton)
out_flux = torch.from_numpy(out_flux)
out_skeleton = torch.from_numpy(out_skeleton)
out_skeleton = out_skeleton.unsqueeze(0)
if out_label is not None:
out_label = torch.from_numpy(out_label) # did not create a copy because remove non central seg creates a copy
out_label = out_label.unsqueeze(0)
# Turn input to Pytorch Tensor, unsqueeze once to include the channel dimension:
out_input = torch.from_numpy(out_input.copy())
out_input = out_input.unsqueeze(0)
if self.mode == 'train':
# TODO if masked loss around center is needed use this mask for rebalancing
# mask = morphology.binary_dilation(out_label[0].numpy(), structure=np.ones((5, 5, 5)))
# mask = mask.astype(np.float32)
# Rebalancing
temp = 1.0 - out_label.clone()
weight_factor, weight = rebalance_binary_class(temp, mask=None) # torch.from_numpy(mask)
flux_weight = self.compute_flux_weights(out_label, out_skeleton)
if(out_label.shape[1] != 64):
import pdb; pdb.set_trace()
return pos, out_input, out_label, out_flux, out_skeleton, weight, weight_factor, flux_weight
else:
return pos, out_input
def get_pos_dataset(self, index):
return np.argmax(index < self.sample_num_c) - 1 # which dataset
def get_pos_seed(self, seed, offset=None):
pos = [0, 0, 0, 0]
# pick a dataset
did = self.get_pos_dataset(seed.randint(self.sample_num_a))
pos[0] = did
# pick a mask bin
# p = [0.45, 0.15, 0.10, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
size_bin = np.random.choice(len(self.seed_points[did]))
# pick a index
idx = np.random.randint(self.seed_points[did][size_bin].shape[0])
# pick a position
if offset is None:
pos[1:] = self.seed_points[did][size_bin][idx] + self.seed_points_offset
else:
pos[1:] = self.seed_points[did][size_bin][idx] + offset
return pos
def get_pos_test(self, index):
did = self.get_pos_dataset(index)
idx = index - self.sample_num_c[did]
pos = self.seed_points[did][0][idx]
pos = pos + self.seed_points_offset
return np.concatenate(([did], pos))
def get_vol(self, pos):
out_input = crop_volume(self.input[pos[0]], self.sample_input_size, pos[1:])
out_input = torch.from_numpy(out_input.copy())
out_input = out_input.unsqueeze(0)
return out_input
def keep_seg(self, label, seg_id_to_keep):
return label == seg_id_to_keep
def remove_non_central_seg(self, label):
out_label, _ = scipy_label(label)
if out_label[tuple(self.half_input_sz)] == 0:
print('Center pixel is not inside 2nd inference\'s GT segmentation.')
print('This probably happened due to augmentation')
# Find nearby segment id and use that for now
seg_ids = np.unique(out_label[self.half_input_sz[0]-5:self.half_input_sz[0]+6,
self.half_input_sz[1]-5:self.half_input_sz[1]+6,
self.half_input_sz[2]-5:self.half_input_sz[2]+6])
seg_ids = seg_ids[seg_ids > 0]
if seg_ids.shape[0] > 1:
print('More than 1 disconnected segments near the center. This should have never happened!')
print('Using the first segment')
c_seg_id = seg_ids[0]
out_label = (out_label == c_seg_id)
else:
out_label = (out_label == out_label[tuple(self.half_input_sz)])
return out_label
def compute_flux(self, segment, skeleton):
skeleton_points = np.transpose(np.nonzero(skeleton))
# Finding closest points to skeleton
kdtree = spatial.KDTree(skeleton_points)
points = np.transpose(np.nonzero(segment))
_, idxs = kdtree.query(points)
dir_vec = skeleton_points[idxs] - points
factor = np.sqrt((np.sum(dir_vec**2, axis=1)) + np.finfo(np.float32).eps)
dir_vec = dir_vec / np.expand_dims(factor, axis=1)
# Creating direction field
direction = np.zeros((3,) + segment.shape, dtype=np.float32)
direction[0, tuple(points[:,0]), tuple(points[:,1]), tuple(points[:,2])] = dir_vec[:, 0]
direction[1, tuple(points[:,0]), tuple(points[:,1]), tuple(points[:,2])] = dir_vec[:, 1]
direction[2, tuple(points[:,0]), tuple(points[:,1]), tuple(points[:,2])] = dir_vec[:, 2]
return direction
def compute_skeleton(self, segment):
seg_d = scipy.ndimage.zoom(segment, zoom=[1, 0.20, 0.20], order=3)
seg_d = (seg_d > 0)
down_res = seg_d.shape
for _ in range(3):
seg_d = scipy.ndimage.morphology.binary_dilation(seg_d, structure=self.dilation_sel)
# Computing Skeleton
skeleton_downsampled = skimage.morphology.skeletonize_3d(seg_d)
# nodes = np.stack(skel_object.get_nodes()).astype(np.uint16)
def compute_flux_weights(self, label, skeleton):
weight = torch.zeros_like(label)
label = label > 0
skeleton = skeleton > 0
total_vol = label.sum().float()
skl_vol = skeleton.sum().float()
non_skl_vol = total_vol - skl_vol
weight[skeleton] = non_skl_vol/total_vol
weight[label & ~skeleton] = skl_vol/total_vol
return weight
|
[
"verma.alok001@gmail.com"
] |
verma.alok001@gmail.com
|
0d2d0fe4366f57197fefd87674a593074be7748d
|
586eed41a9a37611b19ab7d79db0b49121799f66
|
/Adventure_Python_Game.py
|
3b6cd2d023a9db27bc1cd4e3cc0ea5274403c2ab
|
[] |
no_license
|
eduardocor89/Adventure-game
|
d180a6e998222bd1276b8a97d65ec7e9cdbc0abe
|
9b6ae352dbaa755607aa2e1257c556f18b53e1d1
|
refs/heads/main
| 2023-03-04T17:14:21.774942
| 2021-02-13T20:21:10
| 2021-02-13T20:21:10
| 338,030,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,975
|
py
|
import time
import random
items = []
people = ["Adolf Hitler", "Benito Musoline", "King Leopold", "Alexander Hamilton"]
enemy = random.choice(people)
def print_pause(message):
'''prints out messages slowly'''
time.sleep(1)
print(message)
time.sleep(1)
def intro():
'''Describes the world to player'''
print_pause("You find yourself standing in an open field, "
"filled with grass and yellow wildflowers.")
print_pause("Rumor has it that the wicked " + enemy +
" is somewhere around here, and has been terrifying"
" the nearby village.\n")
def landing():
'''Where the player starts the world'''
print_pause("Enter 1 to knock on the door of the house.\n"
"Enter 2 to peer into the cave. \n"
"What would you like to do?")
option = valid_input("Please enter 1 or 2\n", ['1', '2'])
if option == '1':
house()
elif option == '2':
cave()
def valid_input(prompt, options):
while True:
option = input(prompt).lower()
if option in options:
return option
print_pause("Sorry, I don't understand " + option + ".")
def house():
'''What happens inside house'''
print_pause("You approach the door of the house")
print_pause("You are about to knock when the door opens"
" and out steps " + enemy)
print_pause("Eep! This is the enemy's house!")
print_pause(enemy + " attacks you!")
fight()
def fight():
'''Player fights'''
if 'sword' not in items:
print_pause("You feel a bit under-prepared for this,"
" what with only having a tiny dagger")
action = valid_input("Would you like to 1 fight, or 2 run away?\n",
['1', '2'])
if action == '2':
print_pause("You run back into the field. "
"Luckily, you don't seem to have been followed.")
landing()
else:
print_pause("You do your best...")
print_pause("but your dagger is no match for " + enemy)
print_pause("you have been defeated!")
restart_game()
else:
action = valid_input("Would you like to 1 fight, or 2 run away?\n",
['1', '2'])
if action == '2':
print_pause("You run back into the field."
"Luckily, you don't seem to have been followed.")
landing()
else:
print_pause("As " + enemy + " moves to attack,"
" you unsheath your new sword.")
print_pause("The Sword of Ogoroth shines brightly"
" in your hand")
print_pause("as you brace yourself for the attack.")
print_pause("But " + enemy + " takes one look "
"at your shiny new toy and runs away!")
print_pause("You have rid the town of " + enemy)
print_pause("\nYOU ARE VICTORIOUS\n")
restart_game()
def cave():
'''Cave'''
print_pause("You peer cautiously into the cave.")
print_pause("It turns out to be only a very small cave.")
print_pause("Your eye catches a glint of metal behind a rock.")
print_pause("You have found the magical Sword of Ogoroth!")
print_pause("You discard your silly old dagger and"
" take the sword with you.")
print_pause("You walk back out to the field.\n")
items.append("sword")
landing()
def restart_game():
'''Play again?'''
print_pause("\n\nGAME OVER\n")
again = valid_input("\nWould you like to play again?\n"
"Enter 'yes' or 'no'\n", ["yes", "no"])
if "yes" in again:
print_pause("\nExcellent! Restarting the game...\n\n")
play_game()
elif "no" in again:
print_pause("\nGoodbye, brave warrior")
def play_game():
intro()
landing()
play_game()
|
[
"noreply@github.com"
] |
eduardocor89.noreply@github.com
|
b34f36df8a12a3eb0da4a3bac62f1312dd42b488
|
9102c3a5fa3a5b0202d61206973d0ea167f7a4d0
|
/August/30-LargestComponentSizebyCommonFactor.py
|
c772458c707ab0f764caae19f088bad275b69fc5
|
[] |
no_license
|
Madhav-Somanath/LeetCode
|
8e1b39e106cec238e5a2a3acb3eb267f5c36f781
|
b6950f74d61db784095c71df5115ba10be936c65
|
refs/heads/master
| 2023-01-08T15:10:00.249806
| 2020-10-31T14:45:43
| 2020-10-31T14:45:43
| 255,654,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
""" Given a non-empty array of unique positive integers A, consider the following graph:
There are A.length nodes, labelled A[0] to A[A.length - 1];
There is an edge between A[i] and A[j] if and only if A[i] and A[j] share a common factor greater than 1.
Return the size of the largest connected component in the graph. """
class DSU:
def __init__(self, N):
self.p = list(range(N))
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
xr, yr = self.find(x), self.find(y)
self.p[xr] = yr
class Solution:
def primes_set(self,n):
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
return self.primes_set(n//i) | set([i])
return set([n])
def largestComponentSize(self, A: List[int]) -> int:
n = len(A)
UF = DSU(n)
primes = defaultdict(list)
for i, num in enumerate(A):
pr_set = self.primes_set(num)
for q in pr_set: primes[q].append(i)
for _, indexes in primes.items():
for i in range(len(indexes)-1):
UF.union(indexes[i], indexes[i+1])
return max(Counter([UF.find(i) for i in range(n)]).values())
|
[
"madhav.somanath@gmail.com"
] |
madhav.somanath@gmail.com
|
32dfd32e42ef76c7c13cde82e6183c5ec91ceb2b
|
f8bc54c4eeeadee96df0c42e0c0274bf82e4fc16
|
/test/test_CalcRating.py
|
d35b4dc4dbb8c1bcacd03dd054e8c5a48b9e8ab7
|
[] |
no_license
|
Di-98/PTLab1
|
7f08b27313f8ac50a1f4e1d4faf10d3e191baf2d
|
74ae325c50c38cd50e03f5314843d764b961b011
|
refs/heads/main
| 2023-08-01T23:43:31.974359
| 2021-09-28T13:44:06
| 2021-09-28T13:44:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
from typing import Dict, Tuple
from Types import DataType
from CalcRating import CalcRating
import pytest
RatingsType = Dict[str, float]
class TestCalcRating():
@pytest.fixture()
def input_data(self) -> Tuple[DataType, RatingsType]:
data: DataType = {
"Абрамов Петр Сергеевич":
[
("математика", 80),
("русский язык", 76),
("программирование", 100)
],
"Петров Игорь Владимирович":
[
("математика", 61),
("русский язык", 80),
("программирование", 78),
("литература", 97)
]
}
rating_scores: RatingsType = {
"Абрамов Петр Сергеевич": 85.3333,
"Петров Игорь Владимирович": 79.0000
}
return data, rating_scores
def test_init_calc_rating(self, input_data:
Tuple[DataType,
RatingsType]) -> None:
calc_rating = CalcRating(input_data[0])
assert input_data[0] == calc_rating.data
def test_calc(self, input_data:
Tuple[DataType, RatingsType]) -> None:
rating = CalcRating(input_data[0]).calc()
for student in rating.keys():
rating_score = rating[student]
assert pytest.approx(rating_score,
abs=0.001) == input_data[1][student]
|
[
"alexey.bezruchenko@yandex.ru"
] |
alexey.bezruchenko@yandex.ru
|
64477e95e1a0ae9e1812c0c114cf87a0cbb5dcb2
|
25476f58ab74593902c0db71dd8e560dafa5442a
|
/tools/platform-tools/systrace/catapult/devil/devil/devil_env_test.py
|
e78221a07000fb52b7f967318a310e761555e43d
|
[
"BSD-3-Clause",
"Apache-2.0",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"NCSA",
"LicenseRef-scancode-unicode",
"LGPL-2.1-only",
"OpenSSL",
"blessing",
"MIT",
"NICTA-1.0",
"LicenseRef-scancode-protobuf",
"GPL-2.0-or-later",
"LicenseRef-scancode-openssl",
"Libpng",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-pcre",
"Zlib"
] |
permissive
|
CanciuCostin/android-spyware
|
859771d8ba17b434f3f330b08d6b28f9b26a5068
|
be9c2989a76214462b9fe5869c79ffbe86151f13
|
refs/heads/master
| 2023-04-11T11:34:01.983825
| 2023-03-26T12:25:01
| 2023-03-26T12:25:01
| 253,235,389
| 360
| 104
|
MIT
| 2023-03-03T12:59:41
| 2020-04-05T12:58:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import logging
import sys
import unittest
from devil import devil_env
_sys_path_before = list(sys.path)
with devil_env.SysPath(devil_env.PYMOCK_PATH):
_sys_path_with_pymock = list(sys.path)
import mock # pylint: disable=import-error
_sys_path_after = list(sys.path)
class DevilEnvTest(unittest.TestCase):
def testSysPath(self):
self.assertEquals(_sys_path_before, _sys_path_after)
self.assertEquals(
_sys_path_before + [devil_env.PYMOCK_PATH],
_sys_path_with_pymock)
def testGetEnvironmentVariableConfig_configType(self):
with mock.patch('os.environ.get',
mock.Mock(side_effect=lambda _env_var: None)):
env_config = devil_env._GetEnvironmentVariableConfig()
self.assertEquals('BaseConfig', env_config.get('config_type'))
def testGetEnvironmentVariableConfig_noEnv(self):
with mock.patch('os.environ.get',
mock.Mock(side_effect=lambda _env_var: None)):
env_config = devil_env._GetEnvironmentVariableConfig()
self.assertEquals({}, env_config.get('dependencies'))
def testGetEnvironmentVariableConfig_adbPath(self):
def mock_environment(env_var):
return '/my/fake/adb/path' if env_var == 'ADB_PATH' else None
with mock.patch('os.environ.get',
mock.Mock(side_effect=mock_environment)):
env_config = devil_env._GetEnvironmentVariableConfig()
self.assertEquals(
{
'adb': {
'file_info': {
'linux2_x86_64': {
'local_paths': ['/my/fake/adb/path'],
},
},
},
},
env_config.get('dependencies'))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
[
"costin.canciu@ibm.com"
] |
costin.canciu@ibm.com
|
4c3630fa7f9aa1f52b6fa1a39a51fe053f26f1ad
|
df93cf07a45ff105402bcb828dc2bf2ed9b6d952
|
/exercicio-012.py
|
a1a0046a297c72a4d5d35944ce23140716103d7d
|
[] |
no_license
|
carlosmachadojr/Curso-em-Video-Python-3
|
d19807191ab967fa26f9332eeef31ab7a6f1ed5e
|
c3c2faa8d68e7a9738aeb4973f9371149837fe54
|
refs/heads/master
| 2021-02-11T15:57:47.220758
| 2020-03-07T02:38:28
| 2020-03-07T02:38:28
| 244,507,162
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
##### PORCENTAGEM - CÁLCULO DE DESCONTOS #####
""" CURSO EM VÍDEO - EXERCÍCIO PYTHON 12:
Faça um algoritmo que leia o preço de um produto e mostre seu novo preço,
com 5% de desconto.
Link: https://youtu.be/4MAmKOT9FeU
"""
###############################################################################
### INÍCIO DO PROGRAMA ########################################################
###############################################################################
separador_1 = '\n' + '-'*80 + '\n'
separador_2 = '\n' + '-'*25 + '\n'
print(separador_1)
preco = round(float(input('Qual o preço do produto? R$ ')) , 2)
desconto = 5 # em %
novo_preco = round(preco * (1 - desconto/100) , 2)
print(separador_2)
print('Com o desconto de 5' + '% ' + 'o produto que custa ' +
'R$ {} passa a custar R$ {}.'.format(preco , novo_preco))
print(separador_1)
###############################################################################
### FIM DO PROGRAMA ###########################################################
###############################################################################
|
[
"noreply@github.com"
] |
carlosmachadojr.noreply@github.com
|
d3416b979f79b7c5103f2e5dbe93fec870fc97b6
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba3712.pngMap.py
|
908829e20f14429ced9cb9035658d7b8b75c9d77
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471
| 2019-04-29T04:59:35
| 2019-04-29T04:59:35
| 168,515,579
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,468
|
py
|
ba3712.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111100111111111111111111111111111111111111111111111111111111111111111111111110001111111111111111111111111111111111110',
'11111111111111111101111111111111111111111111111111111111111111111111111111111111111111110001111111111111111111111111111111111111',
'11111111111111111100111111111111111111111111111111111111111111111111111111111111111110000111111111111111111111111111111111111111',
'11111111111111111101111111111111111111111111111111111111111111111111111111111111000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111000000000011111111111111111111111111111111111111',
'10111111111111111111111111111111111111111111111111111111111111111111111111111111000000000111111111111111111111111111111111111111',
'10011111111111111111100111111111111111111111111111111111111111111111111111111010000000000111111111111111111111111111111111111111',
'01001111111111111111111111111111111111111111111111111111111111111111111111001000000000011111111111111111111111111111111111111111',
'00001111111111111111111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000111111101000000000000001011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000001001111111111111111111111111111111111111111111',
'11111111111100111111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111111111111',
'11111111111000011111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111111111111',
'11111111111101011111111111111111111111111111111111111111111111110000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111001111111111111111111111111000000000000000001111111111111111111111111111111111111111111111',
'11111111111110001011111111111111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111111111',
'00010111111110011111111111111111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111111111',
'00001111111110001111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111111',
'01010111111111111111111111111111111111111111111111111111111111111100000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000010011111111111111111111111111111111111111111111111',
'11111111111111110111111111111111111111111111111111111111111111111111000000000000111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111001111',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000001111111111111111111111111111111111111110111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111110011111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000001111111111111111111111111111111111111110001111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111001111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111000001000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111000111111111111111111111110000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111010000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111110100000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111100111111100111111111111111111111111111111110000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111100111111100111111111111111111111111111110000000000000000000000000001111111111111111111111111111111111111111111',
'11111111111111111110111111100111111111111111111111111111000000000000000000000000000001111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111110000000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111110000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111100001000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111001101110000000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110001000000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111110111111111111111111111111111111111111111101000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111100001111111111111111111111111111111111111111000000000000000000000000000000000000011111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111111',
]
|
[
"bili33@87ouo.top"
] |
bili33@87ouo.top
|
568bb4d357653572b46fb50c13970053985c3718
|
4f409291f40ed615300710260a1a014f29b9dbbb
|
/env_health_dashboard/server_locations.py
|
9474d1cb9f3679296680a94e7e25cf4601e213d2
|
[] |
no_license
|
wenxian/env-health-dashboard
|
34ac7f12ed7c996fa9a7a3ef41df18c9e7138572
|
04d5e36aceb4be5750aeba8b692e0ec271476fed
|
refs/heads/master
| 2021-01-23T13:17:29.760500
| 2014-05-21T18:18:47
| 2014-05-21T18:18:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
SFLY_JIRA = "https://bugs.tinyprints.com"
SFLY_CHINA_JENKINS = "http://china.stage.shutterfly.com:2010/"
SFLY_TRE_JENKINS = "http://tre-jenkins.internal.shutterfly.com:8080/"
ALEXANDRIA_SERVER = "http://test-results.internal.shutterfly.com"
def get_server(job_repository):
if job_repository == "tre-jenkins":
path_server = SFLY_TRE_JENKINS
if job_repository == "china":
path_server = SFLY_CHINA_JENKINS
return path_server
|
[
"wyang@shutterfly.com"
] |
wyang@shutterfly.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.