blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e94751b6f70c73ed790cef4cef4bfb8083f9ffd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_traipsed.py | f59c7ae5d2434f5d2f1133296a72f7b2307b4aa4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _TRAIPSED():
def __init__(self,):
self.name = "TRAIPSED"
self.definitions = traipse
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['traipse']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
37e1e2f43b6abf4adb8f7a1bbb7af5db0d5c8355 | 3e640f5c59e6cbd1741afb08212aa0e3627f9752 | /deps/bcRead.py | 296744bfcbb0f25518a6b6d59317e399202bc7a8 | [
"MIT"
] | permissive | CapPow/VARP_supplimental | 36a845cf5713c8d9243d7a1a2ac3afc35472f078 | 1db65f7447ec066232a8cb00c9b86bff9ee11b3f | refs/heads/master | 2023-02-21T11:13:05.537202 | 2021-01-28T05:19:33 | 2021-01-28T05:19:33 | 296,470,678 | 0 | 0 | null | 2021-01-27T23:45:36 | 2020-09-18T00:15:02 | Jupyter Notebook | UTF-8 | Python | false | false | 25,397 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
HerbASAP - Herbarium Application for Specimen Auto-Processing
performs post processing steps on raw format images of natural history
specimens. Specifically designed for Herbarium sheet images.
"""
import re
import numpy as np
import cv2
from pylibdmtx.pylibdmtx import decode as libdmtx_decode
# import the pyzbar fork (local)
from .pyzbar.pyzbar import decode as zbar_decode
###
# Developer note: the libraries: re, pyzbar, and pylibdmtx all have a "decode"
# method which are used in this class. This can cause difficult to debug issues
###
class bcRead():
"""A barcode reader class
Args:
patterns (str): A string of uncompiled, "|" concatenated regex patterns.
backend (str): Either "zbar" or "libdmtx", to determine which libarary
should be used for decoding. Default is 'zbar.'
rotation_list (iterable, optional): iterable containing a series of int
representing image rotations (in degrees) to attempt if no barcode is
found. Default values are [9, 25, 18]. Rotation attempts stop after any
results are found. The list's rotations are cumulative. Short or empty
lists will decrease the time before giving up on finding a barcode.
Attributes:
rePattern (obj): A compiled regex pattern
backend (str): a string to determine which decoder was imported.
rotation_list (list): The saved rotation list
"""
def __init__(self, patterns, backend='zbar',
rotation_list=[9, 25, 18], parent=None, *args):
super(bcRead, self).__init__()
self.parent = parent
self.compileRegexPattern(patterns)
# This might need promoted to a user preference in mainwindow
self.rotation_list = rotation_list
self.backend = backend
def decode_zbar(self, im):
return zbar_decode(im)
def decode_libdmtx(self, im):
return libdmtx_decode(im, timeout=1500)
def set_backend(self, backend='zbar'):
"""
Sets which libarary should be used for decoding. Default is 'zbar.'
:param backend: string either 'zbar' or 'libdmtx' libdmtx is useful for
datamatrix decoding.
:type backend: str
:return:
"""
self.backend = backend
if backend == 'zbar':
self.decode = self.decode_zbar
elif backend == 'libdmtx':
self.decode = self.decode_libdmtx
def compileRegexPattern(self, patterns):
""" compiles a collection specific regex pattern """
# assume an empty pattern is a confused user, match everything.
if patterns == '':
patterns = '^(.*)'
try:
rePattern = re.compile(patterns)
self.rePattern = rePattern
except re.error:
raise
def decodeBC(self, img, verifyPattern=True, return_details=False):
""" attempts to decode barcodes from an image array object.
Given a np array image object (img), decodes BCs and optionally
returns those which match self.rePattern
verifies if the bcData matches the compiled rePattern.
Args:
img (numpy.ndarray): a numpy image array object
return_details (bool, optional): default = False. Whether or not to
return the barcode(s) bounding box coordinate(s) and format(s) in
addition to the barcode value(s). Return a list of dictionaries.
Returns (list): a list of matched barcode value(s) found in the image.
If return_details = True, then returns a list of dictionaries.
"""
# the complete output from pyzbar which matches checkPattern
backend = self.backend
if backend == 'zbar':
#code_reader = self.decode_zbar
code_reader = self.extract_by_squares
elif backend == 'libdmtx':
code_reader = self.decode_libdmtx
# decode each code found from bytes to utf-8
bcRawData = [x.data.decode('utf-8') for x in code_reader(img)]
if verifyPattern: # limit the results to those matching rePattern
bcRawData = [x for x in bcRawData if self.rePattern.match(x)]
return bcRawData
def rotateImg(self, img, angle, reversible=False):
"""
given a np array image object (img), and an angle rotates the img
without cropping the corners. If reversable == True, calculate the
reversible matrix
"""
# see: https://stackoverflow.com/questions/48479656/how-can-i-rotate-an-ndarray-image-properly
# https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
(height, width) = img.shape[:2]
(cent_x, cent_y) = (width // 2, height // 2)
mat = cv2.getRotationMatrix2D((cent_x, cent_y), -angle, 1.0)
cos = np.abs(mat[0, 0])
sin = np.abs(mat[0, 1])
n_width = int((height * sin) + (width * cos))
n_height = int((height * cos) + (width * sin))
mat[0, 2] += (n_width / 2) - cent_x
mat[1, 2] += (n_height / 2) - cent_y
rotated_img = cv2.warpAffine(img, mat, (n_width, n_height))
if reversible: # now calculate the reverse matrix
(r_height, r_width) = rotated_img.shape[:2]
(cent_x, cent_y) = (r_width // 2, r_height // 2)
rev_mat = cv2.getRotationMatrix2D((cent_x, cent_y), angle, 1.0)
rev_mat[0, 2] += (width / 2) - cent_x
rev_mat[1, 2] += (height / 2) - cent_y
return rotated_img, rev_mat
else: # return none so the results can be parsed similarly
return rotated_img, None
def det_bc_center(self, rect, rev_mat):
"""
Used to determine the center point of a rotation corrected bounding box
:param rect: a pyzbar rectangle array structured as:
(left, top, width, height)
:type rect: Rect, array
:param angle: the angle of rotation applied to the initial image.
:type angle: int
:param rotated_shape: a tuple containing the rotated image's
(height, width) .
:type rotated_shape: tuple
:return: Returns the center point of the barcode before rotation.
:rtype: tuple, (x, y)
"""
px = rect.left + (rect.width/2)
py = rect.top + (rect.height/2)
if not isinstance(rev_mat, np.ndarray):
# no rotation, so current centerpoint is correct centerpoint
return (int(px), int(py))
# otherwise convert current centerpoint using reverse matrix
nx, ny = rev_mat.dot(np.array((px, py) + (1,))).astype(int)
return (nx, ny)
def angle_cos(self, p0, p1, p2):
"""
Utalized in find_squares, from opencv samples
"""
d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')
return abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2)))
def adjust_gamma(self, image, gamma=1.0):
#from https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def find_squares(self, img):
"""
Heavily modified from opencv samples, attempts to identify squares
in an img.
"""
ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img = cv2.GaussianBlur(img, (3, 3), 3)
img = cv2.erode(img, None)
img = cv2.dilate(img, None, iterations=2)
squares = []
for thrs in range(0, 255, 51):
if thrs == 0:
bin = cv2.Canny(img, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(img, thrs, 255, cv2.THRESH_BINARY)
contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
contourArea = cv2.contourArea(cnt)
if len(cnt) == 4 and contourArea > 25 and contourArea < 10000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([self.angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in range(4)])
if max_cos < 0.1 :
squares.append(cnt)
return squares
def merge_proposals(self, images):
"""
given a list of image slices, merges them vertically into one image.
"""
min_pix_length = 10
images = [x for x in images if x.shape[0] > min_pix_length]
height = max(image.shape[0] for image in images) +1
width = len(images) + 1
output = np.zeros((height,width)).astype('uint8')
y = 0
for image in images:
h = image.shape[0] - 1
w = 1
output[0:h+1, y+w] = image
y += w
return output
def det_midpoint(self, p1, p2):
"""
called by det_centroid_intersection()
"""
return int((p1[0]+p2[0])/2), int((p1[1]+p2[1])/2)
def det_centroid_intersection(self, square):
"""
given a square contour, returns 2 vectors intersecting the midpoint.
"""
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
return ab_mid, cd_mid, da_mid, bc_mid
def extend_vector(self, p1, p2, h, w, extend=500):
"""
given 2 points of a vector, extends it an arbitrary amount not
exceeding a given height or width and not less than 0.
"""
theta = np.arctan2(p1[1]-p2[1], p1[0]-p2[0])
endpt_x = max(0, min(p1[0] - extend*np.cos(theta), w))
endpt_y = max(0, min(p1[1] - extend*np.sin(theta), h))
theta = np.arctan2(p2[1]-p1[1], p2[0]-p1[0])
startpt_x = max(0, min(p2[0] - extend*np.cos(theta), w))
startpt_y = max(0, min(p2[1] - extend*np.sin(theta), h))
return startpt_x, startpt_y, endpt_x, endpt_y
def extract_vector_coords(self, x1, y1, x2, y2, h, w):
"""
given 2 points of a vector, returns coordinates for the nearest pixels
traversed by that vector.
Modified from:
https://stackoverflow.com/questions/7878398/how-to-extract-an-arbitrary-line-of-values-from-a-numpy-array
"""
length = int(np.hypot(x2-x1, y2-y1))
x = np.linspace(x1, x2, length)
x = np.rint(x).astype(int)
y = np.linspace(y1, y2, length)
y = np.rint(y).astype(int)
pix_coords = y, x
return pix_coords
def extract_by_squares(self, gray, retry=True, extension=6):
"""
given a numpy array image attempts to identify all barcodes using
vector extraction.
"""
img = gray.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
cv2.drawContours(img, squares, -1, (0,255,0), 2)
#cv2.imwrite("squares.jpg", img[4897:5682, 1352:3009, ...])
#print(f'found {len(squares)} squares.')
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
merged_lines = self.merge_proposals(line_data)
#print(f'merged_lines shape = {merged_lines_shape}')
z = zbar_decode(merged_lines, y_density=0, x_density=1)
# fallback methods if no results are found.
if len(z) < 1 and retry:
# first try darkening it
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 & retry:
# if all that fails squares again but with a darker img
gray = self.adjust_gamma(gray, 0.4)
z = self.extract_by_squares(gray, retry=False)
if len(z) < 1 & retry:
# if that fails, try squares on shrunk img
o_h, o_w = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size)
#print(f'retrying with size {new_size}')
z = self.extract_by_squares(gray, retry=False)
return z
def extract_by_squares_with_annotation(self, gray, fname, retry=True,
extension=6):
"""
This method only exists to produce a visual representation of the
VARP process.
"""
base_fn = fname.rsplit(".", 1)[0]
retry_img = gray.copy()
img = gray.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
# save the annotated squares (subfig "A")
cv2.drawContours(img, squares, -1, (0,255,0), 2)
squares_fn = base_fn + "_squares.jpg"
cv2.imwrite(squares_fn, img)
#print(f'found {len(squares)} squares.')
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
# annotate the extension
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), thickness=1, lineType=8)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
# annotate the extension
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), thickness=1, lineType=8)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
# save the annotated vectors (subfig "B")
lines_fn = base_fn + "_vectors.jpg"
cv2.imwrite(lines_fn, img)
merged_lines = self.merge_proposals(line_data)
# save the resulting composite (subfig "C")
comp_fn = base_fn + "_composite.jpg"
cv2.imwrite(comp_fn, merged_lines)
#print(f'merged_lines shape = {merged_lines_shape}')
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 and retry:
print("Implimenting fallback methods:")
# first try darkening it
print(" [fallback method]: darkening composite image")
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
print(" [fallback method]: sharpening composite image")
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
# if all that fails squares again but with a darker img
print(" [fallback method]: darkening input image")
gray = self.adjust_gamma(retry_img, 0.4)
z = self.extract_by_squares_with_annotation(gray, fname, retry=False, extension=extension)
if len(z) < 1:
# if that fails, try squares on shrunk img
print(" [fallback method]: shrunk input image")
o_w, o_h = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size)
#print(f'retrying with size {new_size}')
z = self.extract_by_squares_with_annotation(gray, fname, retry=False, extension=extension)
return z
def reduction_determination_extract_by_squares(self, gray, retry=True, extension=6):
"""
This method only exists to determine a typical resolution reduction
given a numpy array image attempts to identify all barcodes using
vector extraction.
"""
# apparently this does not generalize well for very large resolutions
h, w = gray.shape[0:2]
if max(w,h) > 6800:
new_size = (int(w*0.8), int(h*0.8))
w, h = new_size
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
# ID squares
squares = self.find_squares(gray)
if len(squares) < 1:
z = zbar_decode(gray, y_density=3, x_density=3)
reduction = 0
else:
# iterate over each and det their midpoint intersects
h -= 1
w -= 1
line_data = []
# extension happens in both directions, therefore effectively doubled.
extend = min(h, w) // extension
for square in squares:
a, b, c, d = square
ab_mid = self.det_midpoint(a, b)
cd_mid = self.det_midpoint(c, d)
x1, y1, x2, y2 = self.extend_vector(ab_mid, cd_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
da_mid = self.det_midpoint(d, a)
bc_mid = self.det_midpoint(b, c)
x1, y1, x2, y2 = self.extend_vector(da_mid, bc_mid, h, w, extend=extend)
pix_coords = self.extract_vector_coords(x1, y1, x2, y2, h, w)
zi = gray[pix_coords]
line_data.append(zi)
merged_lines = self.merge_proposals(line_data)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
o_h, o_w = gray.shape[0:2]
m_h, m_w = merged_lines.shape[0:2]
reduction = self.det_res_reduction(o_h, o_w, m_h, m_w)
if len(z) < 1:
# first try darkening it
merged_lines = self.adjust_gamma(merged_lines, 0.8)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1:
very_gamma_lines = self.adjust_gamma(merged_lines, 0.4)
z = zbar_decode(very_gamma_lines, y_density=0, x_density=1)
if len(z) < 1:
# if that fails try sharpening it
blurred = cv2.GaussianBlur(merged_lines, (0, 0), 10)
merged_lines = cv2.addWeighted(merged_lines, 2, blurred, -1, 0)
z = zbar_decode(merged_lines, y_density=0, x_density=1)
if len(z) < 1 & retry:
# if all that fails squares again but with a darker img
gray = self.adjust_gamma(gray, 0.4)
z, reduction = self.reduction_determination_extract_by_squares(gray, retry=False, extension=extension)
if len(z) < 1 & retry:
# if that fails, try squares on shrunk img
o_h, o_w = gray.shape[0:2]
new_size = (int(o_h * 0.8), int(o_w * 0.8))
gray = cv2.resize(gray, new_size, interpolation=cv2.INTER_NEAREST)
#print(f'retrying with size {new_size}')
z, reduction = self.reduction_determination_extract_by_squares(gray, retry=False, extension=extension)
return z, reduction
def det_res_reduction(self, o_h, o_w, m_h, m_w):
orig_px = o_h * o_w
merged_px = m_h * m_w
reduction = round((merged_px / orig_px) - 1, 8)
return reduction
def testFeature(self, img):
"""Returns bool condition, if this module functions on a test input."""
try:
# set aside current pattern and check for ANYTHING
decodedData = self.decodeBC(img, verifyPattern=True)
# return current pattern
if isinstance(decodedData, list):
return True
else:
return False
except Exception as e:
print(e)
# some unknown error, assume test failed
return False
| [
"calebadampowell@gmail.com"
] | calebadampowell@gmail.com |
a8de915bd630c78b7ed9df7605161ba20b2c4146 | c46eaf84859f830e63ac09d87870ad1aefc4303a | /Python_Rust_Module/fibrs/setup.py | 72e94b2f54b65c1ed6e732dcab7911459c245054 | [
"MIT"
] | permissive | glella/fib | 7a2b21d6607e50f1916ee4a34baf463c130554ea | 440bdb4da5c4ecb06cabeb4b09770a69e204114a | refs/heads/main | 2023-04-01T15:51:25.049402 | 2021-04-13T23:58:58 | 2021-04-13T23:58:58 | 349,891,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import sys
from setuptools import setup
try:
from setuptools_rust import Binding, RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import Binding, RustExtension
setup_requires = ['setuptools-rust>=0.9.2']
install_requires = []
setup(
name='fibrs',
version='0.1',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Rust',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
],
rust_extensions=[
RustExtension('fibrs.fibrs', binding=Binding.PyO3)],
packages=['fibrs'],
zip_safe=False,
)
| [
"arkorott@gmail.com"
] | arkorott@gmail.com |
edde427c7169f9711b7de62ead15d113ad9ef1cc | ab52c09c428f73ad0a43a112a68dc7fe71fdacaf | /CSVfileUpload/CSVapp/forms.py | 5a9cf36f79baf2a2f177d593dc008c4ff8ac0b5e | [] | no_license | Raarav/web-engineering | cebb42ee2b25eb46eda0079402049a476cfbd1e9 | 330aacb51ca492318092824701fabdaf5a450546 | refs/heads/master | 2020-04-28T09:25:41.430471 | 2019-07-28T07:32:54 | 2019-07-28T07:32:54 | 175,166,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django import forms
from CSVapp.models import form
class EventsForm(forms.ModelForm):
class Meta:
model = form
fields = "__all__" | [
"33652351+Raarav@users.noreply.github.com"
] | 33652351+Raarav@users.noreply.github.com |
bfe6d834e8798d3475fd843db6ea34bbfcd75c37 | 94cb06d6a89b3518ab103fab3bcc03634283bde1 | /doc/conf.py | 66028559cfc5b5e931cb05980597b20be38f05b7 | [] | no_license | krakphp/lava | 886bf108fa9ce86e284a070569c883ed8a8d6b1b | 88490c5b9bb577289139f73e91ef996362eb6bf1 | refs/heads/master | 2023-08-12T00:35:49.017101 | 2017-09-17T03:08:31 | 2017-09-17T03:08:31 | 84,716,297 | 1 | 0 | null | 2017-09-17T03:08:32 | 2017-03-12T10:08:09 | PHP | UTF-8 | Python | false | false | 9,752 | py | # -*- coding: utf-8 -*-
#
# Mw Http documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 27 18:12:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lava'
copyright = u'2017, RJ Garcia'
author = u'RJ Garcia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.3'
# The full version, including alpha/beta/rc tags.
release = u'0.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Mw Http v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'MwHttpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MwHttp.tex', u'Mw Http Documentation',
u'RJ Garcia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mwhttp', u'Mw Http Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MwHttp', u'Mw Http Documentation',
author, 'MwHttp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| [
"rj@bighead.net"
] | rj@bighead.net |
e43c62f38f42153c1eeb029ab4ae25e4e477a4a8 | 48db5f8cf922c27d1a1ebab4b98206ca31336c65 | /trainer.py | 8afb7ca41825d2f4225dbb15d85fc659fdfb4136 | [] | no_license | yatendernitk/Machine-Learning-Python | fc45f8339c08d34ebcb8b901491d9251102b1fc7 | 654b75e4d3c4b99ea6e1d48b216fe2e925e994ff | refs/heads/master | 2020-05-23T22:17:27.452184 | 2017-03-14T13:10:34 | 2017-03-14T13:10:34 | 84,795,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | from scipy import optimize
class trainer(object):
def __init__(self, N):
# Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X, y)
return cost, grad
def train(self, X, y):
# Make an internal variable for the callback function:
self.X = X
self.y = y
# Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp': True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS',
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
| [
"ok@Yatenders-MacBook-Pro-2.local"
] | ok@Yatenders-MacBook-Pro-2.local |
2f9d9d0a5f33a0f8e9805fc11884091fcaef038d | 9015783bae7e68571fd349d59a0e7b2c54c5a4a8 | /Factory Method/Bin.py | e143263d0c4e44d4540eb27010b7bda28b4ffdf3 | [] | no_license | Bujno/Design-patterns | 0079fbee05983d099963ccf33996892910f1e1e8 | af9b7ae0ed2a2152b6deed5b59d59369108f7f51 | refs/heads/main | 2023-07-04T13:23:00.178447 | 2021-08-10T17:11:28 | 2021-08-10T17:11:28 | 393,768,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from Sweet import Sweet
class GlassBin:
def __init__(self, sweet_type: Sweet):
self.sweet_type = sweet_type
self.limit = 100
self.minimum = 10
self.set_of_sweets = {self.sweet_type.create_sweet() for _ in range(10)}
def restock(self):
if len(self.set_of_sweets) >= self.minimum:
return
self.set_of_sweets.add(self.sweet_type.create_sweet())
def get_sweet(self):
return self.set_of_sweets.pop() | [
"0k.bujnowicz@gmail.com"
] | 0k.bujnowicz@gmail.com |
e8cff7405331705ecde8b0a9722786a9a9e6d615 | 11ff14c118240e87c4804d0373e4656d0683d479 | /RatToolAgent/test/firefox_test.py | 63c7ccf8fd97890cb406cd2616cc6efaffa93c1d | [] | no_license | wxmmavis/OS3.1 | e3028d9c79d5a1a17449fea6380fcdda902bdec7 | 26d954344207a82d2298821c3c4f01302393dc7e | refs/heads/master | 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import sys
sys.path += ['../../RatToolAgent']
import RatToolAgent as rta
id = rta.init_and_start_browser()
conf = {
'validation_url': "http://172.16.10.252/authenticated/",
'download_loc': r"//a[@id='logo']",
'file_name': "logo.zip",
'page_title': "Ruckus Automation Test",
}
try:
rta.download_file_on_web_server(id, conf.pop('validation_url'),
conf.pop('download_loc'),
conf.pop('file_name'),
**conf
)
except Exception, e:
print '........................................'
print 'Raise:' + e.message
rta.close_browser(id)
| [
"1475806321@qq.com"
] | 1475806321@qq.com |
6d2bcb830f1e1bd1d6ad5e6a33c2a240bceaeb70 | 7606590d781a134cb1134fcf222f3eee6ce19219 | /contours.py | 7e06f119536b0f48d706000474ee09cedeb6ca49 | [] | no_license | lera000/use_openCV | ccca755938a4ce2feeb30c041d326130dad7afbe | b504f807de5380b91d89d1b9235ad48c559a838e | refs/heads/main | 2023-03-27T09:13:34.643495 | 2021-03-26T08:28:03 | 2021-03-26T08:28:03 | 351,703,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | import cv2
import numpy as np
img = cv2.imread('D:/rec.jpg', cv2.IMREAD_UNCHANGED)
# изменение цвета на серый
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = 100
ret, thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# создание пустого изображения для контуров
img_contours = np.zeros(img.shape)
# рисовка контуров на пустом изображении
cv2.drawContours(img_contours, contours, 0, (0, 255, 0), 3)
cv2.imwrite('D:/rec1.png', img_contours)
| [
"noreply@github.com"
] | lera000.noreply@github.com |
8eb0ddd533b6242fa21b29701e10215b497fcd90 | d93901e7ff019c7c929594c17b9ed0c575dd1165 | /NumPyNet/box.py | 506948ebbb806413bf3c0380425a8914f0f69669 | [
"MIT"
] | permissive | Nico-Curti/NumPyNet | 0e673ad3da4120cd761a5b1f4c1f0c429cfd20a9 | c5e217751e28f0812282333b83964b7fee217cfb | refs/heads/master | 2022-05-04T04:51:50.076629 | 2022-03-28T10:02:15 | 2022-03-28T10:02:15 | 199,490,280 | 57 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import operator
from functools import wraps
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
class Box (object):
'''
Detection box class
Parameters
----------
coords : tuple (default=None)
Box Coordinates as (x, y, w, h)
Example
-------
>>> import pylab as plt
>>> from matplotlib.patches import Rectangle
>>>
>>> b1 = Box((.5, .3, .2, .1))
>>> x_1, y_1, w_1, h_1 = b1.box
>>> left_1, top_1, right_1, bottom_1 = b1.coords
>>>
>>> print('Box1: {}'.format(b1))
>>>
>>> b2 = Box((.4, .5, .2, .5))
>>> x_2, y_2, w_2, h_2 = b2.box
>>> left_2, top_2, right_2, bottom_2 = b2.coords
>>>
>>> print('Box2: {}'.format(b2))
>>>
>>> print('Intersection: {:.3f}'.format(b1.intersection(b2)))
>>> print('Union: {:.3f}'.format(b1.union(b2)))
>>> print('IOU: {:.3f}'.format(b1.iou(b2)))
>>> print('rmse: {:.3f}'.format(b1.rmse(b2)))
>>>
>>> plt.figure()
>>> axis = plt.gca()
>>> axis.add_patch(Rectangle(xy=(left_1, top_1),
>>> width=w_1, height=h_1,
>>> alpha=.5, linewidth=2, color='blue'))
>>> axis.add_patch(Rectangle(xy=(left_2, top_2),
>>> width=w_2, height=h_2,
>>> alpha=.5, linewidth=2, color='red'))
'''
def __init__ (self, coords=None):
if coords is not None:
try:
self.x, self.y, self.w, self.h = coords
except ValueError:
class_name = self.__class__.__name__
raise ValueError('{0}: inconsistent input shape. Expected a 4D (x, y, w, h) shapes and given {1}'.format(class_name, coords))
else:
self.x, self.y, self.w, self.h = (None, None, None, None)
def _is_box (func):
'''
Decorator function to check if the input variable is a Box object
'''
@wraps(func)
def _ (self, b):
if isinstance(b, self.__class__):
return func(self, b)
else:
raise ValueError('Box functions can be applied only on other Box objects')
return _
@property
def box(self):
'''
Get the box coordinates
Returns
-------
coords : tuple
Box coordinates as (x, y, w, h)
'''
return (self.x, self.y, self.w, self.h)
def __iter__ (self):
'''
Iter over coordinates as (x, y, w, h)
'''
yield self.x
yield self.y
yield self.w
yield self.h
def __eq__ (self, other):
'''
Check if the box coordinates are equal
'''
return isinstance(other, Box) and tuple(self) == tuple(other)
def __ne__ (self, other):
'''
Check if the box coordinates are NOT equal
'''
return not (self == other)
def __repr__ (self):
'''
Object representation
'''
return type(self).__name__ + repr(tuple(self))
def _overlap (self, x1, w1, x2, w2):
'''
Compute the overlap between (left, top) | (right, bottom) of the coordinates
Parameters
----------
x1 : float
X coordinate
w1 : float
W coordinate
x2 : float
w2 : float
Returns
-------
overlap : float
The overlapping are between the two boxes
'''
half_w1, half_w2 = w1 * .5, w2 * .5
l1, l2 = x1 - half_w1, x2 - half_w2
r1, r2 = x1 + half_w1, x2 + half_w2
return min(r1, r2) - max(l1, l2)
@_is_box
def intersection (self, other):
'''
Common area between boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
intersection : float
Intersection area of two boxes
'''
w = self._overlap(self.x, self.w, other.x, other.w)
h = self._overlap(self.y, self.h, other.y, other.h)
w = w if w > 0. else 0.
h = h if h > 0. else 0.
return w * h
__and__ = intersection
@_is_box
def union (self, other):
'''
Full area without intersection
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
union : float
Union area of the two boxes
'''
return self.area + other.area - self.intersection(other)
__add__ = union
@_is_box
def iou (self, other):
'''
Intersection over union
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
iou : float
Intersection over union between boxes
'''
union = self.union(other)
return self.intersection(other) / union if union != 0. else float('nan')
__sub__ = iou
@_is_box
def rmse (self, other):
'''
Root mean square error of the boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
rmse : float
Root mean square error of the boxes
'''
diffs = tuple(map(operator.sub, self, other))
dot = sum(map(operator.mul, diffs, diffs))
return dot**(.5)
@property
def center(self):
'''
In the current storage the x,y are the center of the box
Returns
-------
center : tuple
Center of the current box.
'''
x, y, _, _ = self._object.box
return (x, y)
@property
def dimensions(self):
'''
In the current storage the w,h are the dimensions of the rectangular box
Returns
-------
dims : tuple
Dimensions of the current box as (width, height).
'''
_, _, w, h = self._object.box
return (w, h)
@property
def area(self):
'''
Compute the are of the box
Returns
-------
area : float
Area of the current box.
'''
return self.w * self.h
@property
def coords(self):
'''
Return box coordinates in clock order (left, top, right, bottom)
Returns
-------
coords : tuple
Coordinates as (left, top, right, bottom)
'''
x, y, w, h = self.box
half_w, half_h = w * .5, h * .5
return (x - half_w, y - half_h, x + half_w, y + half_h)
def __str__(self):
'''
Printer
'''
fmt = '(left={0:.3f}, bottom={1:.3f}, right={2:.3f}, top={3:.3f})'.format(*self.coords)
return fmt
if __name__ == '__main__':
import pylab as plt
from matplotlib.patches import Rectangle
b1 = Box((.5, .3, .2, .1))
x_1, y_1, w_1, h_1 = b1.box
left_1, top_1, right_1, bottom_1 = b1.coords
print('Box1: {}'.format(b1))
b2 = Box((.4, .5, .2, .5))
x_2, y_2, w_2, h_2 = b2.box
left_2, top_2, right_2, bottom_2 = b2.coords
print('Box2: {}'.format(b2))
print('Intersection: {:.3f}'.format(b1.intersection(b2)))
print('Union: {:.3f}'.format(b1.union(b2)))
print('IOU: {:.3f}'.format(b1.iou(b2)))
print('rmse: {:.3f}'.format(b1.rmse(b2)))
plt.figure()
axis = plt.gca()
axis.add_patch(Rectangle(xy=(left_1, top_1), width=w_1, height=h_1, alpha=.5, linewidth=2, color='blue'))
axis.add_patch(Rectangle(xy=(left_2, top_2), width=w_2, height=h_2, alpha=.5, linewidth=2, color='red'))
plt.show()
| [
"nico.curti2@unibo.it"
] | nico.curti2@unibo.it |
34629a6d69c14efaa2f5389ad697c3260d71ecd0 | ab1891d96edf63354926158a2a96481b5ab4587f | /app.py | 894802b303109fabdcbdf2204f97f39c6053f3c7 | [] | no_license | Ravikumar-Orsu/BMI-calculator-using-Flask | 2989357ae3e3f6a2ffd133030a3d7dba47d5ed32 | 35b94bbacc46b4b612589f3492f263352dfe013f | refs/heads/main | 2023-03-22T14:31:36.047122 | 2021-03-16T04:54:21 | 2021-03-16T04:54:21 | 348,218,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from flask import Flask, render_template,request
app = Flask(__name__)
@app.route("/",methods=['GET','POST'])
def index():
bmi=""
if request.method=="POST" and 'weight' in request.form:
weight=float(request.form.get('weight'))
height=float(request.form.get('height'))
bmi=round(weight / (height * height), 2)
return render_template("index.html",bmi=bmi)
app.debug=True
app.run()
app.run(debug=True) | [
"noreply@github.com"
] | Ravikumar-Orsu.noreply@github.com |
a9c5032284b09d96cf4da891a6fccb74215aa549 | 72fe58285dbfefb9151ebb7be11279fd55a8f5b2 | /chapter-04-trees-and-graphs/src/find_common.py | 99507b985f0e5a1e699c7800ffaadf6bf7720497 | [] | no_license | bearzk/cracking-the-coding-interview | 65f48c4352651c20ce6faba11d91c641bdfdfb88 | 461ce1887766f45eea57e3383db2c0fc4506e5ca | refs/heads/master | 2021-01-16T18:43:45.664633 | 2013-05-10T01:48:32 | 2013-05-10T01:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # coding: utf-8
class Result
def __init__(self, node, is_anc):
self.node = node
self.is_anc = is_anc
def common_anc(root, p, q):
if root is None:
return Result(None, False)
if root is p and root is q:
return Result(root, true)
rx = common_anc(root.left, p, q)
if rx.is_anc:
return rx
ry = common_anc(root.right, p, q)
if ry.is_anc:
return ry
if rx.node is not None and ry.node is not None:
return Result(root, True)
elif root is p or root is q:
is_anc = True if rx.node is not None or ry.node is not None else False
return Result(root, is_anc)
else:
return Result(rx.node if rx.node is not None else ry.node, False)
| [
"sugihara@gmail.com"
] | sugihara@gmail.com |
8fab3a6177d60d8c3c725dbf64490e37be63d9d3 | 0f70a3eee204d7450fc955f10376bd45585b225c | /relational_operator.py | 53644ec930c9346b1ecf57434b82c2da97ac1f7f | [] | no_license | WillyChen123/2015Summer | 958aa4b6b969eff9f9b04052bbea43cf64b60ce8 | a8a3ea886d88db42299fe59880c99323516b2457 | refs/heads/master | 2021-01-01T05:38:35.912470 | 2015-07-15T03:10:55 | 2015-07-15T03:10:55 | 39,056,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | print 3==5
print 3!=5
print 3>5
print 3>=5
print 3<5
print 3<=5 | [
"st40809@gmail.com"
] | st40809@gmail.com |
f22e1fdd4d81cea8b179bd88fa184e853f2d3167 | 133643780ba3ee33291471261e9d365c3b0bb9ae | /Includes/reset-model.py | e1543f7f67d1c4b0ec89948999389b0ee30c9c68 | [] | no_license | rajeshcummins/streaming-lakehouse | b884919af33a0af8a367dcfbfca469a0c997980c | c5533627a472fae25fcb85662eb11f434efd85d4 | refs/heads/master | 2023-06-02T06:41:47.162425 | 2021-06-15T17:03:23 | 2021-06-15T17:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Databricks notebook source
from mlflow.tracking.client import MlflowClient
from mlflow.utils.rest_utils import RestException
dbutils.widgets.text("model_name", "foo")
model_name = dbutils.widgets.get("model_name")
client = MlflowClient()
try:
for version in [model.version for stage in ["production", "staging"] for model in client.get_latest_versions(model_name, stages=[stage])]:
client.transition_model_version_stage(model_name, version, "archived")
client.delete_registered_model(model_name)
except RestException as E:
print(E)
| [
"dstrodtman-db"
] | dstrodtman-db |
4394a0d430e67e0281718259baad893a3d33cdd3 | ab67dd529f45972b14e9a42fab5531861dbc582d | /Computer Science GCSE/Project/dobVerify.py | 20173cc912a82ee267f034620ac95d93a95e8402 | [] | no_license | hirurana/Past-Projects | e7f2e767625ca9df175c542f672a9f8ba5734875 | 5c29f59ce4a395f9a2b2b9b65b1d2594e5e5d6fa | refs/heads/master | 2021-08-27T23:47:47.146865 | 2017-12-10T20:27:22 | 2017-12-10T20:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | dob = []
#ask for date of birth
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
if int(newDob[0])>3:
print("ERROR1")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
elif int(newDob[3])>1:
print("ERROR2")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
elif int(newDob[3])==1 and int(newDob[4])>2:
print("ERROR3")
newDob = input("Enter you date of birth in this format (e.g 10/08/99): \n")
dob.append(newDob)
| [
"zcabhra@ucl.ac.uk"
] | zcabhra@ucl.ac.uk |
537d0566875454072446c9da67810baccfa301fc | b46e3837b5fa2e877ad0370aaa62a1c82d01408c | /old_files/akp/akp.py | 79a392d299c8580cab8e206a823db16e33a0ac38 | [] | no_license | alisever/WebCrawling | 13c3236ac1319ef9d7f23012a3d4f72f5dec73b7 | 4de03f9af3b2dec6647f01957fbe92bc4774040c | refs/heads/main | 2023-09-05T19:04:59.069459 | 2021-11-10T16:07:33 | 2021-11-10T16:07:33 | 399,955,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | import json
import scrapy
base = 'https://www.akparti.org.tr/haberler/kategori/genel-baskan'
class AllSpider(scrapy.Spider):
name = 'akp_all'
start_urls = [base]
page_no = 0
def parse(self, response, **kwargs):
self.page_no += 1
headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"content-type": "application/json",
"sec-ch-ua": "\"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"sec-gpc": "1",
"x-requested-with": "XMLHttpRequest"
}
payload = {
"category": "27d14268-9f90-4830-b011-c48df5b97843",
"page": self.page_no,
"culture": "tr",
"newsRootId": 1164,
"tag": ""
}
if self.page_no == 1:
for link in response.css('div.col-md-4.'
'col-sm-6 a::attr(href)').getall():
yield {
'link': 'https://www.akparti.org.tr' + link
}
yield scrapy.Request(
'https://www.akparti.org.tr/cms/surface/NewsAjaxOperations/Get',
method='POST',
headers=headers,
body=json.dumps(payload),
callback=self.parse)
else:
hasan = json.loads(response.text)
for item in hasan['Items']:
yield {
'link': 'https://www.akparti.org.tr' + item['Url']
}
if hasan['HasNext']:
yield scrapy.Request('https://www.akparti.org.tr/cms/surface/NewsAjaxOperations/Get',
method='POST',
headers=headers,
body=json.dumps(payload),
callback=self.parse)
with open('akp.json') as json_file:
news_pages = json.load(json_file)
class SingleSpider(scrapy.Spider):
name = 'akp'
start_urls = [a.get('link') for a in news_pages[:10]]
def parse(self, response, **kwargs):
yield {
'Url': response.request.url,
'Baslik': response.css('div.content.clearfix.newsDetail > h1 ::text').get().replace("\r", "").replace("\n", "").replace("\xa0", " "),
'Tarih': response.css('div.col-md-6 span ::text').get()[:-9],
'Detay': ' '.join(response.css('div.content.clearfix.newsDetail > p ::text').getall()).replace("\r", "").replace("\n", "").replace("\xa0", " "),
}
# class ExceptionSpider(scrapy.Spider):
# name = 'yeni_safak_except'
# start_urls = ['https://www.yenisafak.com/dusunce-gunlugu/'
# '15-temmuz-2016da-turkiyede-ne-oldu-2499421']
#
# def parse(self, response, **kwargs):
# yield {
# 'Url': response.request.url,
# 'Baslik': response.css('h1.title::text').get(),
# 'Tarih': response.css('time.item.time::text').get().partition(
# ',')[0],
# 'Detay': ''.join(response.css('[class^="text text"]::text'
# ).getall()).strip(),
# 'Yazar': response.css('div.text.text-666666 > strong::text').get()[1:],
# 'Haber / Kose Yazisi / Konusma': 'Kose Yazisi'
# }
| [
"alisever96@hotmail.com"
] | alisever96@hotmail.com |
636959b4b870889c2713bd4b762912ae01610e75 | b75d6359ae8166287ad3e2cf0cf0115facfc4e48 | /stonks/stonks_view/urls.py | f26091ebd08a383fe3e698bebfaee4c83446615f | [] | no_license | webclinic017/stonks-5 | 1e9f60da74fb74f4e09bb3f840de0fe1e5141ae8 | 4e5e59c98c420a88eb4836b2316c3820b64b8484 | refs/heads/master | 2023-04-18T07:31:04.963716 | 2021-04-25T17:24:47 | 2021-04-25T17:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from django.urls import path, include
from stonks_view import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'companies', views.CompanyViewSet)
router.register(r'company-earnings', views.CompanyEarningsViewSet, basename="companyearnings")
router.register(r'company-balance', views.CompanyBalanceViewSet)
router.register(r'company-cash-flow', views.CompanyCashFlowViewSet)
router.register(r'watch-list', views.WatchListViewSet)
router.register(r'watch-items', views.WatchItemsViewSet)
from pprint import pprint
pprint(router.urls)
pprint(router.get_default_basename(views.WatchItemsViewSet))
pprint(router.get_default_basename(views.WatchListViewSet))
# user_list
urlpatterns = [
path('', include(router.urls)),
path('register/', views.UserCreate.as_view()),
] | [
"bulvikk@gmail.ccom"
] | bulvikk@gmail.ccom |
5a452e46158ffd07b091c7de1a10427e119ef214 | 956b239b94b090931232f5f974366ee82bb89fef | /app/auth/email.py | 9eaedbed9bb867d3ae9c8fc1436b50726e8eba49 | [] | no_license | victor-aunon/microblog | 57b3650cf9f999dba32ea4a6d3843dc00ed41bdc | b74107d5a0df4649ee6a8e2e055fba03c01bb09e | refs/heads/master | 2022-12-16T10:36:51.045300 | 2019-04-21T01:22:15 | 2019-04-21T01:22:15 | 180,673,044 | 2 | 0 | null | 2022-12-08T04:58:18 | 2019-04-10T22:25:32 | Python | UTF-8 | Python | false | false | 497 | py | from flask import render_template, current_app
from flask_babel import _
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Microblog] Reset Your Password'), sender=current_app.config['ADMINS'][0],
recipients=[user.email], text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token)) | [
"angel.aunon.garcia@gmail.com"
] | angel.aunon.garcia@gmail.com |
4bad0a9d74fdc33c1b08594b16c3ae6ae2d4ad36 | 26b6a35e2415d94fbc1c9fc43814309a5d6f443b | /tests/test_openapi_basic.py | f18074c73970570a97135bc4faab94c39ee95a93 | [
"BSD-3-Clause",
"MIT"
] | permissive | BigRLab/apiflask | 57e0c036aa5d284da5340dcecd49108eea651bcd | d6dd5595009be5de6a7741a5a887276c3ac011bf | refs/heads/main | 2023-05-30T21:30:17.930046 | 2021-07-11T04:07:15 | 2021-07-11T04:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,700 | py | import json
import pytest
from openapi_spec_validator import validate_spec
from .schemas import BarSchema
from .schemas import BazSchema
from .schemas import FooSchema
from apiflask import doc
from apiflask import input
from apiflask import output
from apiflask import Schema as BaseSchema
from apiflask.fields import Integer
def test_spec(app):
assert app.spec
assert 'openapi' in app.spec
def test_spec_processor(app, client):
@app.spec_processor
def edit_spec(spec):
assert spec['openapi'] == '3.0.3'
spec['openapi'] = '3.0.2'
assert app.title == 'APIFlask'
assert spec['info']['title'] == 'APIFlask'
spec['info']['title'] = 'Foo'
return spec
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['openapi'] == '3.0.2'
assert rv.json['info']['title'] == 'Foo'
@pytest.mark.parametrize('spec_format', ['json', 'yaml', 'yml'])
def test_get_spec(app, spec_format):
spec = app._get_spec(spec_format)
if spec_format == 'json':
assert isinstance(spec, dict)
else:
assert 'title: APIFlask' in spec
def test_get_spec_force_update(app):
app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
spec = app._get_spec()
assert '/foo' not in spec['paths']
new_spec = app._get_spec(force_update=True)
assert '/foo' in new_spec['paths']
def test_spec_attribute(app):
spec = app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
assert '/foo' not in spec['paths']
assert '/foo' in app.spec['paths']
def test_spec_schemas(app):
@app.route('/foo')
@output(FooSchema(partial=True))
def foo():
pass
@app.route('/bar')
@output(BarSchema(many=True))
def bar():
pass
@app.route('/baz')
@output(BazSchema)
def baz():
pass
class Spam(BaseSchema):
id = Integer()
@app.route('/spam')
@output(Spam)
def spam():
pass
class Schema(BaseSchema):
id = Integer()
@app.route('/schema')
@output(Schema)
def schema():
pass
with app.app_context():
spec = app.spec
assert len(spec['components']['schemas']) == 5
assert 'FooUpdate' in spec['components']['schemas']
assert 'Bar' in spec['components']['schemas']
assert 'Baz' in spec['components']['schemas']
assert 'Spam' in spec['components']['schemas']
assert 'Schema' in spec['components']['schemas']
def test_servers_and_externaldocs(app):
assert app.external_docs is None
assert app.servers is None
app.external_docs = {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
app.servers = [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['externalDocs'] == {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
assert rv.json['servers'] == [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
def test_auto_200_response(app, client):
@app.get('/foo')
def bare():
pass
@app.get('/bar')
@input(FooSchema)
def only_input():
pass
@app.get('/baz')
@doc(summary='some summary')
def only_doc():
pass
@app.get('/eggs')
@output(FooSchema, 204)
def output_204():
pass
@app.get('/spam')
@doc(responses={204: 'empty'})
def doc_responses():
pass
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert '200' in rv.json['paths']['/foo']['get']['responses']
assert '200' in rv.json['paths']['/bar']['get']['responses']
assert '200' in rv.json['paths']['/baz']['get']['responses']
assert '200' not in rv.json['paths']['/eggs']['get']['responses']
assert '200' not in rv.json['paths']['/spam']['get']['responses']
assert rv.json['paths']['/spam']['get']['responses'][
'204']['description'] == 'empty'
def test_sync_local_json_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'json'
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
with open(local_spec_path) as f:
spec_content = f.read()
assert json.loads(spec_content) == app.spec
assert '{\n "info": {' in spec_content
assert '"title": "APIFlask",' in spec_content
def test_sync_local_yaml_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'yaml'
rv = client.get('/openapi.json')
assert rv.status_code == 200
with open(local_spec_path) as f:
spec_content = f.read()
assert spec_content == str(app.spec)
assert 'title: APIFlask' in spec_content
def test_sync_local_spec_no_path(app):
app.config['SYNC_LOCAL_SPEC'] = True
with pytest.raises(TypeError):
app.spec
| [
"withlihui@gmail.com"
] | withlihui@gmail.com |
abcd9cf3a6a72e23d78bf410cfbdac852343d238 | eb40dce4039d528b9cd06dbeda75da09d09d7fc5 | /need_install/Django-1.8.17/tests/basic/models.py | 0ebe3e0b4af812d92177a78a86fa007380fb0e16 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MulticsYin/MulticsSH | 39b62189446787c7f0f037b1640c9c780bd1dddd | 5837a0bff0e7da0e8535e4e0b31ef6baf24274b4 | refs/heads/master | 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # -*- coding: utf-8 -*-
"""
Bare-bones model
This is a basic model with only two non-primary-key fields.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
class Meta:
ordering = ('pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleSelectOnSave(Article):
class Meta:
proxy = True
select_on_save = True
@python_2_unicode_compatible
class SelfRef(models.Model):
selfref = models.ForeignKey('self', null=True, blank=True,
related_name='+')
article = models.ForeignKey(Article, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
# This method intentionally doesn't work for all cases - part
# of the test for ticket #20278
return SelfRef.objects.get(selfref=self).pk
| [
"multics_luo@163.com"
] | multics_luo@163.com |
0ecb406dc4b005795c6d37aaa895fd106844ac7f | b1e7481f8b5bf40c2547c95b1863e25b11b8ef78 | /Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_C.py | a8cd9f368837fbf5bec45d00d8e189ee53cc12fe | [
"Apache-2.0"
] | permissive | NJManganelli/FourTopNAOD | 3df39fd62c0546cdbb1886b23e35ebdc1d3598ad | c86181ae02b1933be59d563c94e76d39b83e0c52 | refs/heads/master | 2022-12-22T22:33:58.697162 | 2022-12-17T01:19:36 | 2022-12-17T01:19:36 | 143,607,743 | 1 | 1 | Apache-2.0 | 2022-06-04T23:11:42 | 2018-08-05T11:40:42 | Python | UTF-8 | Python | false | false | 6,794 | py | #!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = True
isUltraLegacy = False
era = "2017"
subera = "C"
thePreselection = None
crossSection = None
equivLumi = 41.53
nEventsPositive = None
nEventsNegative = None
sumWeights = None
TriggerChannel = "Mu"
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
| [
"nicholas.james.manganelli@cern.ch"
] | nicholas.james.manganelli@cern.ch |
0cbc26a7c531c9e66e72aff03e1ef1e05d090406 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2542/60761/235001.py | 0f6cce935b31eb1a6dc6d3e0854022eb80c48159 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import math
arr=input("")
arr=arr.replace("[","")
arr=arr.replace("]","")
arr=list(map(int,arr.split(",")))
arr.sort()
i=1
maxlen=1
templen=1
while(i<len(arr)):
if(arr[i]==arr[i-1]+1):
templen=templen+1
else:
maxlen=max(templen,maxlen)
templen=1
i=i+1
print(maxlen) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
62130bd3030fa0dac1dd8acc26e97984f4f5efaa | fcd000497f179b1f43fead5a3198b0cbc85a5f0a | /src/smile_to_world_xiaofeng/hello.py | 08c2291ec06d2f8a2a828a4f8667aecddafe99e5 | [
"MIT"
] | permissive | XiaofengZhu/pypi-hello-world | 649480b524a2e9fa6bd347afdeb8ce4c53728a95 | ed980582362774c0b6350e0d0ade358022a14bbd | refs/heads/main | 2023-05-02T14:34:53.842517 | 2021-05-13T20:34:19 | 2021-05-13T20:34:19 | 366,445,915 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import time
class Hello:
def __init__(self):
self._author = 'Xiaofeng Zhu'
def print_message(self):
print('Hello world from {} at {}'.format(self._author, time.time())) | [
"xiaofengzhu@Xiaofengs-MBP.hsd1.wa.comcast.net"
] | xiaofengzhu@Xiaofengs-MBP.hsd1.wa.comcast.net |
9e2c439b85cafbc58738b03142b5eb7d4ab22848 | f0c44fbc139404011f55edc0c6a4f3c07f63a89f | /Assignment_6_Sort_Dataframe_Columns.py | 08be0b7cd396afa624b77cdc2ab28cbe72c46da6 | [] | no_license | A-Kryston/ISM-4402-BI-Public | 9a2e24102da19776b7d54a13b1bf51e621e0cf72 | ffe78e2dbc2c0e1bd184dc59b0710a61d28f80c1 | refs/heads/master | 2020-07-22T23:35:29.094728 | 2019-12-02T21:22:03 | 2019-12-02T21:22:03 | 207,369,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Andrea Kryston
# Assignment #6 - Sort columns in a dataframe (pg 60)
# Due Date - October 14, 2019
import pandas as pd # Import pandas library
Location = "Datasets/gradedata.csv" # Locate file to be used for this exercise
df = pd.read_csv(Location) # Create df dataframe from gradedata.csv file
df.head() # View first five rows to verify dataframe
# In[ ]:
df = df.sort_values(by=['lname','age','grade'], ascending=[True,True,False]) # Sort the dataframe as follows:
# 'lname' column (ascending order)
# 'age' column (ascending order)
# 'grade' column (descending order)
df.head() # View first five rows to verify dataframe
| [
"noreply@github.com"
] | A-Kryston.noreply@github.com |
2905b09b116aa8620144bb52dcea36c4afdbe2c0 | f3065d37482cffadb5fe8caa397a8cc946710485 | /django_structure/src/profiles/migrations/0012_auto_20180321_1745.py | 279bfa92783124289a640b745c930b92b7f4703f | [] | no_license | AlfredMulder/Django_work | 6d006f8b1e0c679ed43377fb891d215e870114a9 | 5d347d6897f58f6977ab584a80edb111ed2dabaf | refs/heads/master | 2020-04-10T21:56:01.475953 | 2019-06-25T15:13:19 | 2019-06-25T15:13:19 | 124,296,776 | 0 | 0 | null | 2018-03-07T21:34:41 | 2018-03-07T21:29:05 | null | UTF-8 | Python | false | false | 440 | py | # Generated by Django 2.0.3 on 2018-03-21 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0011_auto_20180321_1744'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='path/to/my/default/image.jpg', upload_to='profile_image'),
),
]
| [
"xanderhref@protonmail.com"
] | xanderhref@protonmail.com |
5e3af5114585babcf6308a0eed07cab9358841d2 | 7edebeae484480e9ecd786308846b434f3ace53c | /python/clu/websocket.py | 7cfe3da86f84c10d7fa762a32a19e9cb9f065dc8 | [
"BSD-3-Clause"
] | permissive | sdss/clu | fb7b220df092447bbbd6de996dc0fa1785b5ff87 | 086e80a6baa783e25f8c7ca73cd4ba5c42234e08 | refs/heads/main | 2023-08-30T00:30:42.191915 | 2023-08-25T19:07:55 | 2023-08-25T19:07:55 | 183,817,446 | 6 | 0 | BSD-3-Clause | 2023-05-25T06:56:06 | 2019-04-27T20:12:55 | Python | UTF-8 | Python | false | false | 3,648 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2023-05-25
# @Filename: websocket.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from websockets.legacy.protocol import broadcast
from websockets.server import serve
from clu.client import AMQPClient
if TYPE_CHECKING:
from websockets.server import WebSocketServerProtocol
from clu.client import AMQPReply
class WebsocketServer:
"""A websocket server that allows communication with the RabbitMQ exchange.
The websocket server is a simple pass-through between a websocket client and
an `.AMQPClient` that connects to the RabbitMQ exchange. Any `.AMQPReply`
received by the AMQP client is packaged as a JSON and forwarded to the
websocket clients. Websocket clients can send messages with the format ::
{
"consumer": ...,
"command_string": ...,
"command_id": ...
}
that will be sent to the corresponding actor commands queue. The websocket
server does not track command completion, which is left to the user. Including
a ``command_id`` with the message is recommended for the client to be able to
track commands.
Parameters
----------
whost
The host where to run the websocket server.
wport
The TCP port on which to run the websocket server.
client_kwargs
Arguments to pass to the `.AMQPClient` connection to RabbitMQ.
"""
def __init__(self, whost: str = "0.0.0.0", wport: int = 9876, **client_kwargs):
self.client = AMQPClient(**client_kwargs)
self.wparams = (whost, wport)
self.wclients: set[WebSocketServerProtocol] = set()
async def start(self):
"""Start the server and AMQP client."""
self.client.add_reply_callback(self._handle_reply)
await self.client.start()
self.websocket_server = await serve(
self._handle_websocket,
*self.wparams,
)
return self
async def stop(self):
"""Stop the server and AMQP client."""
await self.client.stop()
self.websocket_server.close()
async def _handle_websocket(self, websocket: WebSocketServerProtocol):
"""Handle a connection to the websocket server."""
# Register the client
self.wclients.add(websocket)
async for data in websocket:
try:
message = json.loads(data)
if not isinstance(message, dict):
continue
except ValueError:
continue
if "consumer" not in message or "command_string" not in message:
continue
command_id = message.get("command_id", None)
await self.client.send_command(
message["consumer"],
message["command_string"],
command_id=command_id,
await_command=False,
)
self.wclients.remove(websocket)
async def _handle_reply(self, reply: AMQPReply):
"""Broadcast a reply to the connected websockets."""
message = reply.message
data = dict(
headers=message.headers,
exchange=message.exchange,
message_id=message.message_id,
routing_key=message.routing_key,
timestamp=message.timestamp.isoformat() if message.timestamp else None,
body=reply.body,
)
broadcast(self.wclients, json.dumps(data))
| [
"noreply@github.com"
] | sdss.noreply@github.com |
fc362768e4ec1bd2b2882b5a20af0d37ee5f822a | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow_nightly/source2.7/tensorflow/contrib/model_pruning/python/layers/core_layers.py | 764ab620bc2227ff5e8e3f473d689e0e133e83d4 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 19,691 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.mask = self.add_variable(
name='mask',
shape=[input_shape[-1].value, self.units],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
b81df29cd5bc086a10b9d34a9da102aebefd5724 | 47766dd4a7fe325ff53db9754d9962d54c3bce20 | /week3/review/3_iterator_filter.py | 80ea40d7c9f41ed54fa37c8d4e41776379da7cfc | [
"Apache-2.0"
] | permissive | skku-overflow/python-2020-2 | 1ed327a075b4ddd6b1feecb5cabc75e8913de726 | def09d9a8ff32ee085edaa5eca89ccc03c29af2a | refs/heads/main | 2023-03-06T16:13:40.439521 | 2021-02-21T07:23:07 | 2021-02-21T07:23:07 | 316,898,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py |
def is_even(n):
return n % 2 == 0
l = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(l)
evens = list(filter(is_even, l))
print(evens)
def is_first_even(arg):
a, b = arg
return a % 2 == 0
l2 = [(1, 2), (2, 3)]
print(l2)
print(list(filter(is_first_even, l2)))
| [
"kdy1997.dev@gmail.com"
] | kdy1997.dev@gmail.com |
7c658b02af1216d35936435030ac30caedbcf48f | e79888cd68177e7ec5125270cdc52f888e211e78 | /hirao/chapter01/knock04.py | de4c0c4219da8267d76dd51e2e4cbcf9b31ea0fd | [] | no_license | cafenoctua/100knock2019 | ec259bee27936bdacfe0097d42f23cc7500f0a07 | 88717a78c4290101a021fbe8b4f054f76c9d3fa6 | refs/heads/master | 2022-06-22T04:42:03.939373 | 2019-09-03T11:05:19 | 2019-09-03T11:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | s = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
drop = ",."
print(s)
for c in list(drop):
s = s.replace(c, "")
s = s.split()
display_list = [1, 5, 6, 7, 8, 9, 15, 16, 19]
ans_dict = {}
for i, word in enumerate(s):
if i + 1 in display_list:
ans = word[0]
else:
ans = word[:2]
ans_dict[ans] = i + 1
print(ans_dict)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
d7212911ec66d3aef973e7604e6b39d5d3ee0e66 | 38242a2852f166267bf65b050ec95ed8cb877852 | /python_tips/manage.py | 7a69fcaa0b68220124d0d1e0fcbc5f915d12f9c7 | [
"MIT"
] | permissive | charlesDavid009/tweety | 33cb9307aba2f131caf6184de785117c6027aa53 | 52d1dcda47c12596a4d37e7e253a41b0130a6a61 | refs/heads/main | 2023-05-31T08:34:06.950645 | 2021-07-14T08:25:19 | 2021-07-14T08:25:19 | 383,505,671 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'python_tips.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"zuesghost1@gmail.com"
] | zuesghost1@gmail.com |
bec783ab3edecf3218d44d70daa4be040eb61f27 | a398e6f0d692130b64c12943b1efdaa6c3a0eac9 | /Chapter08/display_message.py | da194bda7a95a1ce1874642fe44ec068f5e26d7b | [] | no_license | xue9981/LP2 | c0792681c928348d6ff479315eb7e9d80be8a156 | 500d585ffe057b4a8522f9df2b2052e56bbf25f8 | refs/heads/master | 2020-07-30T23:09:06.089682 | 2019-10-01T16:07:22 | 2019-10-01T16:07:22 | 210,391,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | def display_message():
print("関数の使い方、作成の仕方について学ぶ")
display_message()
| [
"xue@cs.meiji.ac.jp"
] | xue@cs.meiji.ac.jp |
577a5e847afa46777a5a089b90511294bade82fa | 64261553066b91dd774f3e051658f83fd41dc415 | /MediumLevel/LongestSubstringWithoutRepeatingCharacters.py | f38736bb7f38f3fb696a229a234ae0b6ab97781e | [] | no_license | sahilshah1610/LeetCode | 1b5ec827cce3c4d66eda24fd16a1d7266ff8fc47 | 28e65a2caba7cf0c757195f76bcaf17e8dd28419 | refs/heads/master | 2023-03-02T15:36:15.325209 | 2021-02-12T01:48:15 | 2021-02-12T01:48:15 | 314,371,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s)==0:
return 0
mapping = {}
maxLenght=start=0
for i in range(len(s)):
if s[i] in mapping and start <= mapping[s[i]]:
start= mapping[s[i]] +1
else:
maxLenght = max(maxLenght, i-start+1)
mapping[s[i]] = i
#print(start, maxLenght, mapping)
return maxLenght
if __name__ == "__main__":
s = "abcabcbb"
objSol = Solution()
print(objSol.lengthOfLongestSubstring(s)) | [
"sahil.shah56@gmail.com"
] | sahil.shah56@gmail.com |
809f94d693db05b2538535e2564bda1e23251399 | fc76bfd8519ad194a07d21130b74fe5794517056 | /PicoCTF 2017/Level 3/Cryptography/smallRSA/main.py | 65ddd2a955ddfa77ed7e1c613a96d45fb304dade | [] | no_license | t00lbox/CTF-Writeups | 7fe63bbe7a5af3f30d2d9542e2ac51ff828bc0e9 | 62b45e679dee40f75bd4cc914bc2e893a85f20a4 | refs/heads/master | 2020-03-21T14:48:58.859341 | 2018-06-26T00:06:01 | 2018-06-26T00:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | e = 165528674684553774754161107952508373110624366523537426971950721796143115780129435315899759675151336726943047090419484833345443949104434072639959175019000332954933802344468968633829926100061874628202284567388558408274913523076548466524630414081156553457145524778651651092522168245814433643807177041677885126141
n = 380654536359671023755976891498668045392440824270475526144618987828344270045182740160077144588766610702530210398859909208327353118643014342338185873507801667054475298636689473117890228196755174002229463306397132008619636921625801645435089242900101841738546712222819150058222758938346094596787521134065656721069
c = 60109758698128083867894286068285517856121577775873732971271767838094375540242140682860856525076716857853484762310661349595705965454241788627490154678487289327504291223547525832864143253412180183596307295520420578906308624860023542143928885210079178897416418810270090406582415840515326539954964020452551186119
import RSAwienerHacker
d = RSAwienerHacker.hack_RSA(e,n)
print(d)
m = pow(c,d,n)
print(bytearray.fromhex(hex(m).split('x')[1]).decode()) | [
"19695201+BOAKGP@users.noreply.github.com"
] | 19695201+BOAKGP@users.noreply.github.com |
9368e2c5913f38ac451218f6b28dc42e68db257c | c7178837cf98e1bc89027d8462c57727ea05de58 | /elections/urls.py | 4474d57f79fb8941a6cafe1abfa416547c0b3f7c | [] | no_license | sook1421/django_j | a04c223a49fb6f7cb9426eb929722eeb70857ba5 | 7d11792fcf723156d6e3667c5df7ef0201ce415f | refs/heads/master | 2021-09-06T23:04:42.955695 | 2018-02-13T05:26:09 | 2018-02-13T05:26:09 | 103,805,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index),
]
| [
"sook1421@chol.com"
] | sook1421@chol.com |
13e7dfb79f5a9e988593ddae9d68927018ac1463 | f070c3acba7da2254adc2c12f80e54b830396d40 | /test/venv/bin/futurize | 65e98b939532e827e94109ba696ca6402ce2bfc3 | [] | no_license | liruidesysu/cloudCluster | 241a6ac472ecce9c6b4c966a44304128d258fc9b | fc558b464c3052f59cb1e6326aa22bade556b0c8 | refs/heads/master | 2022-11-06T03:51:31.954607 | 2019-08-22T12:47:53 | 2019-08-22T12:47:53 | 200,144,454 | 0 | 1 | null | 2022-03-29T21:56:02 | 2019-08-02T01:42:17 | Python | UTF-8 | Python | false | false | 252 | #!/home/liruide/Desktop/cloudCluster/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libfuturize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"liruide_read@outlook.com"
] | liruide_read@outlook.com | |
309503445ede6d2acdd13fee339865beec91d5f7 | 17625b317bdd2111453bdcf05f0e7cdb140b888e | /keepitpossible/backup/unity_lib.py | 82f854bcb2e98ae3d45d901386ba15f6466f3970 | [
"Apache-2.0"
] | permissive | ChenKuanSun/TheObstacleTowerChallenge | e93bdeb74dc0790d965111c9ee41c03472ba7a35 | c2de16930dd88949c0bc6a460f378beae3a04204 | refs/heads/master | 2021-02-27T06:31:10.861317 | 2020-03-07T07:35:25 | 2020-03-07T07:35:25 | 245,587,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,491 | py | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
# Modifications copyright 2019 Unity Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Obstacle Tower-specific utilities including Atari-specific network architectures.
This includes a class implementing minimal preprocessing, which
is in charge of:
. Converting observations to greyscale.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from obstacle_tower_env import ObstacleTowerEnv
import gym
from gym.spaces.box import Box
import numpy as np
import tensorflow as tf
import gin.tf
import cv2
slim = tf.contrib.slim
NATURE_DQN_OBSERVATION_SHAPE = (84, 84) # Size of downscaled Atari 2600 frame.
NATURE_DQN_DTYPE = tf.uint8 # DType of Atari 2600 observations.
NATURE_DQN_STACK_SIZE = 4 # Number of frames in the state stack.
@gin.configurable
def create_otc_environment(environment_path=None):
"""Wraps an Obstacle Tower Gym environment with some basic preprocessing.
Returns:
An Obstacle Tower environment with some standard preprocessing.
"""
assert environment_path is not None
env = ObstacleTowerEnv(environment_path, 0, retro=False)
env = OTCPreprocessing(env)
return env
def nature_dqn_network(num_actions, network_type, state):
"""The convolutional network used to compute the agent's Q-values.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(net, 32, [8, 8], stride=4)
net = slim.conv2d(net, 64, [4, 4], stride=2)
net = slim.conv2d(net, 64, [3, 3], stride=1)
net = slim.flatten(net)
net = slim.fully_connected(net, 512)
q_values = slim.fully_connected(net, num_actions, activation_fn=None)
return network_type(q_values)
def rainbow_network(num_actions, num_atoms, support, network_type, state):
"""The convolutional network used to compute agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(
net, 32, [8, 8], stride=4, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [4, 4], stride=2, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [3, 3], stride=1, weights_initializer=weights_initializer)
net = slim.flatten(net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
net = slim.fully_connected(
net,
num_actions * num_atoms,
activation_fn=None,
weights_initializer=weights_initializer)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities)
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
state_net = tf.cast(state, tf.float32)
state_net = tf.div(state_net, 255.)
state_net = slim.conv2d(
state_net, 32, [8, 8], stride=4,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [4, 4], stride=2,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [3, 3], stride=1,
weights_initializer=weights_initializer)
state_net = slim.flatten(state_net)
state_net_size = state_net.get_shape().as_list()[-1]
state_net_tiled = tf.tile(state_net, [num_quantiles, 1])
batch_size = state_net.get_shape().as_list()[0]
quantiles_shape = [num_quantiles * batch_size, 1]
quantiles = tf.random_uniform(
quantiles_shape, minval=0, maxval=1, dtype=tf.float32)
quantile_net = tf.tile(quantiles, [1, quantile_embedding_dim])
pi = tf.constant(math.pi)
quantile_net = tf.cast(tf.range(
1, quantile_embedding_dim + 1, 1), tf.float32) * pi * quantile_net
quantile_net = tf.cos(quantile_net)
quantile_net = slim.fully_connected(
quantile_net,
state_net_size,
weights_initializer=weights_initializer)
# Hadamard product.
net = tf.multiply(state_net_tiled, quantile_net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
quantile_values = slim.fully_connected(
net,
num_actions,
activation_fn=None,
weights_initializer=weights_initializer)
return network_type(quantile_values=quantile_values, quantiles=quantiles)
@gin.configurable
class OTCPreprocessing(object):
"""A class implementing image preprocessing for OTC agents.
Specifically, this converts observations to greyscale. It doesn't
do anything else to the environment.
"""
def __init__(self, environment):
"""Constructor for an Obstacle Tower preprocessor.
Args:
environment: Gym environment whose observations are preprocessed.
"""
self.environment = environment
self.game_over = False
self.lives = 0 # Will need to be set by reset().
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.tableAction = self.createActionTable()
def createActionTable(self):
tableAction = []
for a in range(0, 3):
for b in range(0, 3):
for c in range(0, 2):
tableAction.append([a, b, c, 0])
# print("Action option: ", tableAction[6:12])
return tableAction
@property
def observation_space(self):
return self.environment.observation_space
@property
def action_space(self):
return self.environment.action_space
@property
def reward_range(self):
return self.environment.reward_range
@property
def metadata(self):
return self.environment.metadata
def reset(self):
"""Resets the environment. Converts the observation to greyscale,
if it is not.
Returns:
observation: numpy array, the initial observation emitted by the
environment.
"""
observation = self.environment.reset()
observation = observation[0]
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.previous_stage_time_remaining = 3000
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation
def render(self, mode):
"""Renders the current screen, before preprocessing.
This calls the Gym API's render() method.
Args:
mode: Mode argument for the environment's render() method.
Valid values (str) are:
'rgb_array': returns the raw ALE image.
'human': renders to display via the Gym renderer.
Returns:
if mode='rgb_array': numpy array, the most recent screen.
if mode='human': bool, whether the rendering was successful.
"""
return self.environment.render(mode)
def step(self, action):
"""Applies the given action in the environment. Converts the observation to
greyscale, if it is not.
Remarks:
* If a terminal state (from life loss or episode end) is reached, this may
execute fewer than self.frame_skip steps in the environment.
* Furthermore, in this case the returned observation may not contain valid
image data and should be ignored.
Args:
action: The action to be executed.
Returns:
observation: numpy array, the observation following the action.
reward: float, the reward following the action.
is_terminal: bool, whether the environment has reached a terminal state.
This is true when a life is lost and terminal_on_life_loss, or when the
episode is over.
info: Gym API's info data structure.
"""
observation, reward, game_over, info = self.environment.step(np.array(self.tableAction[int(action)-1]))
observation, keys, time_remaining = observation
self.stage_reward, previous_stage_time_remaining = self.reward_compute(done=game_over,
reward_total=self.stage_reward,
keys=keys,
previous_keys=self.previous_keys,
reward=reward,
previous_reward=self.previous_reward,
time_remaining=time_remaining,
previous_time_remaining=self.previous_time_remaining,
previous_stage_time_remaining=self.previous_stage_time_remaining)
self.previous_reward = reward
self.previous_keys = keys
self.previous_time_remaining = time_remaining
self.game_over = game_over
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation, self.stage_reward, game_over, info
def reward_compute(
self,
done,
reward_total,
keys,
previous_keys,
reward,
previous_reward,
time_remaining,
previous_time_remaining,
previous_stage_time_remaining):
# 定義獎勵公式
# reward 是從環境傳來的破關數
# keys 是撿到鑰匙的數量
# time_remaining 是剩餘時間
# 過關最大獎勵為10
# 一把鑰匙為5
# 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。
# 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000
# print("time_remaining ", time_remaining,
# " previous_time_remaining ", previous_time_remaining,
# " reward ", reward)
if reward < 0.2:
reward = 0
if (reward - previous_reward) > 0.8:
# ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。
# ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。
print("Pass ", reward, " Stage!")
reward_total += (reward - previous_reward) * 100 - \
(previous_stage_time_remaining - time_remaining)
# 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用
previous_time_remaining = time_remaining
previous_stage_time_remaining = time_remaining
# 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成
if previous_keys > keys:
print("Get Key")
reward_total += 5
if previous_time_remaining < time_remaining and previous_time_remaining != 0:
print("Get time power up")
reward_total += 0.5
else:
reward_total -= 0.1
if done and previous_time_remaining > 100:
print("Agent died")
# 如果剩餘時間越多就掛點,扣更多
reward_total -= (10 + time_remaining / 100)
return reward_total, previous_stage_time_remaining
| [
"fgriasa123@gmail.com"
] | fgriasa123@gmail.com |
26030f347e76ad066fd9e10dc35bf688f5b153f8 | b8fd409ee054f68807e607b01f9c7e9bf9e3cbc8 | /newPollApp/asgi.py | 196033b636c49dbe18a7484aa38b2d7d265f3dc0 | [] | no_license | diptajit-dev-biswas/python-startapp | 1fa4e7b5ab9e75f0e139473e82eda1a31a98a90b | f6cd5000cf08a9dba0c5c6b903d70dd88e13449d | refs/heads/main | 2023-07-17T18:54:18.826199 | 2021-08-16T08:11:30 | 2021-08-16T08:11:30 | 396,665,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for newPollApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newPollApp.settings')
application = get_asgi_application()
| [
"diptajit@appstangodev.com"
] | diptajit@appstangodev.com |
9a0274111f19a917ed032d7aca9be73c9e930a24 | 02a9e1be198a82793a9e9aea1f0f46cde0e8c85e | /prac_01/broken_score.py | feaede5759932a9410dbe13cbc382d805aaeb5cf | [] | no_license | DaniRyland-Lawson/CP1404-cp1404practicals- | 04cfce69f55b26cf7cf30d2ea2a1198d9514c527 | 8b8776ca5a8dee8f8c21697c79dbbd2cd95320f7 | refs/heads/master | 2020-07-06T22:50:21.130873 | 2019-11-12T03:45:50 | 2019-11-12T03:45:50 | 203,162,819 | 0 | 0 | null | 2019-11-12T03:49:59 | 2019-08-19T11:52:56 | Python | UTF-8 | Python | false | false | 384 | py | """
CP1404/CP5632 - Practical
Broken program to determine score status
"""
# In this section, it took me a little while to realise it is best in order. Highest to lowest.
score = float(input("Enter score: "))
if score < 0 or score >100:
print("Invalid score")
elif score >= 90:
print("Excellent")
elif score >= 50:
print("Passable")
else:
print("Bad") | [
"noreply@github.com"
] | DaniRyland-Lawson.noreply@github.com |
0b82a15f2668611f51876dc75eb02c6ccb69ac64 | d320dc1177bd40be1ace9e5b76f25b93a22a6e25 | /appium_test/app_base.py | f12af230cec4031049b0a89eb5d3a39812e07a82 | [] | no_license | qq1403332591/Hogwarts15 | 26c6424246ee3677920a5f44967d1ba8c611bc85 | d6cd852fff89b8f901e449276eb95d64ec3e14f3 | refs/heads/master | 2023-01-09T13:46:01.067470 | 2020-11-11T13:15:51 | 2020-11-11T13:28:15 | 304,650,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from appium import webdriver
class Base():
def setup(self):
desired_caps = {}
desired_caps['platformName'] = 'Android' # 打开什么平台的app,固定的 > 启动安卓平台
desired_caps['platformVersion'] = '10' # 安卓系统的版本号:adb shell getprop ro.build.version.release
desired_caps['deviceName'] = 'V1938T' # 手机/模拟器的型号:adb shell getprop ro.product.model
desired_caps['appPackage'] = 'com.tencent.wework' # app的名字:adb shell dumpsys package XXX
desired_caps[
'appActivity'] = '.launch.LaunchSplashActivity' # 同上↑ # .pages.splash.SplashActivity pages.main.MainActivity
desired_caps['automationName'] = 'uiautomator2'
desired_caps['unicodeKeyboard'] = True # 为了支持中文
desired_caps['resetKeyboard'] = True # 设置成appium自带的键盘
desired_caps['noReset'] = True # 使用app的缓存
desired_caps['skipDeviceInitialization'] = True # 跳过设备初始化
# desired_caps['autoLaunch'] = False # 直接使用打开的app进行测试
# desired_caps['settings[settingsKey]'] = 0 # 动态元素查找的最大等待时间
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.driver.implicitly_wait(5)
def teardown(self):
self.driver.quit()
| [
"1403332591@qq.com"
] | 1403332591@qq.com |
85c63654cacf60d20305f0e502209f0294028474 | 0734f6b90379ba0850426db18a9470a161151805 | /setup.py | 46572860be6de6e827439be07ebb60a686330c3e | [
"MIT"
] | permissive | alexhayes/schematics-xml | af69c6ed4437603bc478d0fc66a4f79d0a204d16 | 36ac15a5e2891d238bebe0b3bb3ef8d5e8425ed9 | refs/heads/develop | 2022-08-01T00:03:14.428519 | 2016-11-11T04:54:32 | 2016-11-11T04:54:32 | 72,046,221 | 2 | 3 | MIT | 2022-07-06T19:48:26 | 2016-10-26T21:32:10 | Python | UTF-8 | Python | false | false | 3,726 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
from setuptools.command.test import test
is_setuptools = True
except ImportError:
raise
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages # noqa
from setuptools.command.test import test # noqa
is_setuptools = False
import os
import sys
import codecs
NAME = 'schematics-xml'
entrypoints = {}
extra = {}
# -*- Classifiers -*-
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: MIT License
Topic :: Other/Nonlisted Topic
Topic :: Software Development :: Libraries :: Python Modules
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Microsoft :: Windows
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
# -*- Distribution Meta -*-
import re
re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)')
re_vers = re.compile(r'VERSION\s*=.*?\((.*?)\)')
re_doc = re.compile(r'^"""(.+?)"""')
rq = lambda s: s.strip("\"'")
def add_default(m):
attr_name, attr_value = m.groups()
return ((attr_name, rq(attr_value)), )
def add_version(m):
v = list(map(rq, m.groups()[0].split(', ')))
return (('VERSION', '.'.join(v[0:3]) + ''.join(v[3:])), )
def add_doc(m):
return (('doc', m.groups()[0]), )
pats = {re_meta: add_default,
re_vers: add_version,
re_doc: add_doc}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'schematics_xml/__init__.py')) as meta_fh:
meta = {}
for line in meta_fh:
if line.strip() == '# -eof meta-':
break
for pattern, handler in pats.items():
m = pattern.match(line.strip())
if m:
meta.update(handler(m))
# -*- Installation Requires -*-
py_version = sys.version_info
def strip_comments(l):
return l.split('#', 1)[0].strip()
def _pip_requirement(req):
if req.startswith('-r '):
_, path = req.split()
return reqs(*path.split('/'))
return [req]
def _reqs(*f):
return [
_pip_requirement(r) for r in (
strip_comments(l) for l in open(
os.path.join(here, 'requirements', *f)).readlines()
) if r]
def reqs(*f):
return [req for subreq in _reqs(*f) for req in subreq]
install_requires = reqs('default.txt')
# -*- Tests Requires -*-
tests_require = reqs('test.txt')
# -*- Long Description -*-
if os.path.exists('README.rst'):
long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
long_description = 'See http://pypi.python.org/pypi/schematics-xml'
setup(
name=NAME,
version=meta['VERSION'],
description=meta['doc'],
author=meta['author'],
author_email=meta['contact'],
url=meta['homepage'],
platforms=['any'],
license='MIT',
packages=find_packages(),
package_data={'schematics_xml': ['tests/templates/*.html']},
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
test_suite='nose.collector',
classifiers=classifiers,
entry_points=entrypoints,
long_description=long_description,
keywords=['schematics', 'xml', 'model', 'modelling', 'dicttoxml', 'xmltodict'],
)
| [
"alex@commoncode.io"
] | alex@commoncode.io |
3c22bf817ee148fbc70da528dfb8cff5991cedb0 | f12fac0dd5c9c9eeedff16377d1f57a3cd02ef32 | /Python游戏编程入门/02.初识Pygame:Pie游戏/绘制弧形.py | 8031255f9f3580e0e721331544bdda1f67ae9357 | [] | no_license | SesameMing/PythonPygame | 61fe09a38d1729963b86f348b349572760676195 | ca0554427cd30838d56630e8b1e04aa0e26834a1 | refs/heads/master | 2020-12-07T21:23:56.271193 | 2016-11-25T06:38:06 | 2016-11-25T06:38:06 | 66,639,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python3
# -*-coding:utf-8-*-
# Author:SesameMing <blog.v-api.cn>
# Email:admin@v-api.cn
# Time:2016-11-25 12:51
import sys
import math
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Arcs")
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit()
screen.fill((0, 0, 200))
color = 255, 0, 255
position = 200, 150, 200, 200
start_angle = math.radians(0)
end_angle = math.radians(180)
width = 8
pygame.draw.arc(screen, color, position, start_angle, end_angle, width)
pygame.display.update() | [
"admin@v-api.cn"
] | admin@v-api.cn |
04e3a1cfd126c0710557c5f5944b73240af4deec | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/vb25/plugins/TexSwirl.py | 9ca7e67f86475efdb3be99c3fa816a582b516141 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | #
# V-Ray/Blender
#
# http://vray.cgdo.ru
#
# Author: Andrey M. Izrantsev (aka bdancer)
# E-Mail: izrantsev@cgdo.ru
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
# Blender module
import bpy
from bpy.props import *
# V-Ray/Blender modules
from vb25.utils import *
from vb25.ui import ui
from vb25.plugins import *
from vb25.texture import *
from vb25.uvwgen import *
TYPE = 'TEXTURE'
ID = 'TexSwirl'
PLUG = 'TexSwirl'
NAME = 'Swirl'
DESC = "TexSwirl"
PID = 15
PARAMS = (
'uvwgen',
'color1',
'color2',
'swirl_intensity',
'color_contrast',
'swirl_amount',
'constant_detail',
'center_x',
'center_y',
'random_seed',
'twist',
)
def add_properties(rna_pointer):
class TexSwirl(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(TexSwirl)
rna_pointer.TexSwirl= PointerProperty(
name= "TexSwirl",
type= TexSwirl,
description= "V-Ray TexSwirl settings"
)
TexSwirl.color1= FloatVectorProperty(
name= "Color 1",
description= "First color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (1,1,1)
)
# color2
TexSwirl.color2= FloatVectorProperty(
name= "Color 2",
description= "Second color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (0,0,0)
)
# swirl_intensity
TexSwirl.swirl_intensity= FloatProperty(
name= "Swirl Intensity",
description= "Swirl Intensity",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 2
)
# color_contrast
TexSwirl.color_contrast= FloatProperty(
name= "Color Contrast",
description= "Color Contrast",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0.4
)
# swirl_amount
TexSwirl.swirl_amount= FloatProperty(
name= "Swirl Amount",
description= "Swirl Amount",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
# constant_detail
TexSwirl.constant_detail= IntProperty(
name= "Constant Detail",
description= "Constant Detail",
min= 0,
max= 100,
soft_min= 0,
soft_max= 10,
default= 4
)
# center_x
TexSwirl.center_x= FloatProperty(
name= "Center X",
description= "Center Position X",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# center_y
TexSwirl.center_y= FloatProperty(
name= "Center Y",
description= "Center Position Y",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# random_seed
TexSwirl.random_seed= FloatProperty(
name= "Random Seed",
description= "Random Seed",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0
)
# twist
TexSwirl.twist= FloatProperty(
name= "Twist",
description= "Twist",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
'''
OUTPUT
'''
def write(bus):
scene= bus['scene']
ofile= bus['files']['textures']
slot= bus['mtex']['slot']
texture= bus['mtex']['texture']
tex_name= bus['mtex']['name']
uvwgen= write_uvwgen(bus)
TexSwirl= getattr(texture.vray, PLUG)
ofile.write("\n%s %s {"%(PLUG, tex_name))
PLUGINS['TEXTURE']['TexCommon'].write(bus)
for param in PARAMS:
if param == 'uvwgen':
value= uvwgen
else:
value= getattr(TexSwirl, param)
ofile.write("\n\t%s= %s;"%(param, a(scene, value)))
ofile.write("\n}\n")
return tex_name
'''
GUI
'''
class VRAY_TP_TexSwirl(ui.VRayTexturePanel, bpy.types.Panel):
bl_label = NAME
COMPAT_ENGINES = {'VRAY_RENDER','VRAY_RENDER_PREVIEW'}
@classmethod
def poll(cls, context):
tex = context.texture
return tex and tex.type == 'VRAY' and tex.vray.type == ID and ui.engine_poll(cls, context)
def draw(self, context):
wide_ui = context.region.width > ui.narrowui
layout = self.layout
tex= context.texture
TexSwirl= getattr(tex.vray, PLUG)
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'color1', text="")
if wide_ui:
col= split.column()
col.prop(TexSwirl, 'color2', text="")
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'swirl_amount', text="Amount")
col.prop(TexSwirl, 'swirl_intensity', text="Intensity")
col.prop(TexSwirl, 'color_contrast', text="Color Contrast")
if not wide_ui:
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'twist')
col.prop(TexSwirl, 'constant_detail')
split= layout.split()
row= split.row(align=True)
row.prop(TexSwirl, 'center_x')
row.prop(TexSwirl, 'center_y')
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'random_seed', text="Seed")
def GetRegClasses():
return (
VRAY_TP_TexSwirl,
)
def register():
for regClass in GetRegClasses():
bpy.utils.register_class(regClass)
def unregister():
for regClass in GetRegClasses():
bpy.utils.unregister_class(regClass)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
aab147e4b5ac5e64ffb29c9993c9b62cc92184ba | 562d45351e4e23f5548a325b884bd57f93e5373d | /venv/bin/ckeygen | cf97c6fe71ee55d25b62eb485e30a320fc7c17a0 | [] | no_license | manjumugali/BridgeLabz_PythonPrograms | 401efc320804d74017b1e6cbe99b45fd75193243 | ff4093cc9b0256ede9f7ef3a0e042fe352263f82 | refs/heads/master | 2020-04-19T22:17:07.050403 | 2019-02-13T05:18:56 | 2019-02-13T05:18:56 | 168,465,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | #!/home/admin1/Desktop/chatApp/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','ckeygen'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'ckeygen')()
)
| [
"manjumugali111@gmail.com"
] | manjumugali111@gmail.com | |
299ff26f17f629c6eed7981129ec94c0daf43684 | f5f92ee9f6a4ae8c7a1c029ccab6386b338069d3 | /wk3/f8.py | 1e8bb4259716d19e5d3e8fe13d7fc39ecf21701f | [] | no_license | JustinAnthonyB/Python | 7300a6e25a314b202c098d08e49037557046fb4c | e647376cf43c6ccdceb38f5d7d34ccda7550ea27 | refs/heads/master | 2020-12-10T03:44:34.555223 | 2020-04-02T19:28:56 | 2020-04-02T19:28:56 | 233,493,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """
Ask the user to enter a password
Determine if password is
Not acceptable
less than 5 chars
Weak
between 5 and 7
Medium
between 8 and 10 chars
Strong
11 chars or more
Using an if statement, output whether text
meets requirement
Input from user?
1: no, default = text
outputs(s):
message of whether text meets requirement
data structures / sanitation:
no. not really
"""
default = input("Enter a password. 8 char or more")
message = "Password is "
strength = "unacceptable"
password_length = len(default)
if password_length >= 5 and password_length < 8:
strength = "weak"
elif password_length > 7 and password_length < 11:
strength = "medium"
elif password_length > 10:
strength = "strong"
print(message + strength)
| [
"noreply@github.com"
] | JustinAnthonyB.noreply@github.com |
5eb0a435e2e4872f4a9c5255d5028bd2f73e4b3d | 89bbb2e46f844b2be046185a4ee54b6d8986a53b | /instagram/urls.py | e97967c6593fe974c8aae481aecf75dd5aa03231 | [
"Beerware"
] | permissive | SL0KY/instagram-like | aedb0ac222fac3350915199c0979e568ca0314e9 | 1641d44d03b0fb51a51cb96520fed279bd6c2e86 | refs/heads/master | 2020-09-21T14:02:01.761038 | 2020-02-13T09:08:58 | 2020-02-13T09:08:58 | 224,810,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | """insta URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from rest_framework import routers
from photos.api import views as photo_api_views
from photos import views as photo_views
router = routers.DefaultRouter()
router.register(r'photos', photo_api_views.PhotoViewSet)
urlpatterns = [
path('api/v1', include(router.urls)),
path('^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path("", photo_views.index),
path('accounts/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('photos/', photo_views.index, name="list_photos"),
path("photos/create", photo_views.PhotoCreateView.as_view(), name='create_photo'),
path("photos/like", photo_views.PhotoLikeView.as_view(), name="like_photo"),
path("photos/<int:pk>", photo_views.PhotoDetailView.as_view(), name="detail_photo"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"dominguesquentin@gmail.com"
] | dominguesquentin@gmail.com |
498848a1ce67711fa364584705c0f90477f76fb5 | 90e049109be38889523b265d2683a4f29a57da30 | /flink-python/pyflink/table/tests/test_table_environment_api.py | 64080f1e53b36dd5df4f0c09993ae8772e33988c | [
"BSD-3-Clause",
"MIT",
"OFL-1.1",
"ISC",
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC-BY-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-proprietary-license",
"Lic... | permissive | Jasonpengrui/flink | bc7cf1baced87a72a75e2bd0e326a137ed0ab529 | 81a5212cb99b860de9c7384fa14caaa3f5af1c1f | refs/heads/master | 2020-06-10T16:44:23.895203 | 2019-12-09T06:35:08 | 2019-12-09T06:35:08 | 193,673,904 | 0 | 0 | Apache-2.0 | 2019-06-25T09:09:15 | 2019-06-25T09:09:14 | null | UTF-8 | Python | false | false | 10,360 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import os
from py4j.compat import unicode
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.table_environment import BatchTableEnvironment, StreamTableEnvironment
from pyflink.table.table_config import TableConfig
from pyflink.table.types import DataTypes, RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase
class StreamTableEnvironmentTests(PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (path: [default_catalog, default_database, Source], fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result)
assert isinstance(actual, str) or isinstance(actual, unicode)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.insert_into("sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['2,Hi,Hello', '3,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sinks select * from %s" % source)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update_with_query_config(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
query_config = t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
t_env.sql_update("insert into sinks select * from %s" % source, query_config)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_query_config(self):
query_config = self.t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
assert query_config.get_max_idle_state_retention_time() == 2 * 24 * 3600 * 1000
assert query_config.get_min_idle_state_retention_time() == 24 * 3600 * 1000
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_explain(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Source", csv_source)
source = t_env.scan("Source")
result = source.alias("a, b, c").select("1 + a, b, c")
actual = t_env.explain(result)
self.assertIsInstance(actual, (str, unicode))
def test_table_config(self):
table_config = TableConfig()
table_config.set_timezone("Asia/Shanghai")
table_config.set_max_generated_code_length(64000)
table_config.set_null_check(True)
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
self.assertTrue(table_config.get_null_check())
self.assertEqual(table_config.get_max_generated_code_length(), 64000)
self.assertEqual(table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(readed_table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(readed_table_config.get_built_in_database_name(), "test_database")
| [
"sunjincheng121@gmail.com"
] | sunjincheng121@gmail.com |
40705c7d2144ce3f9aa84b4ded8f1f39a82e78c7 | 60ebf96283c09886366b7b03cf13a53551a9e33a | /movie.py | 2f9a266ca19eba3f04aad4dd762a0a2c3e773059 | [] | no_license | angietibbs618/movies | 85abee6564c21f0d74821d87d8ee56d0d97d09b6 | 792e1ab824e1bff5abd36456b9ee83e1d98ff554 | refs/heads/master | 2021-01-19T11:35:19.897539 | 2017-04-26T14:19:00 | 2017-04-26T14:19:00 | 87,980,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import webbrowser
class Movie():
""" This class provides a way to store movie related information"""
valid_ratings = ["G", "PG", "PG-13", "R"]
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| [
"noreply@github.com"
] | angietibbs618.noreply@github.com |
9336731a6cd88e7a8b99c2564c74415e27d8d968 | 1e697dc34e0e4ca3fc877209e21a99d15074e71c | /0x02-python-import_modules/1-calculation.py | f3f7b31bc85c69092d619f0b53f1055e8bd128f4 | [] | no_license | ZoltanMG/holbertonschool-higher_level_programming | 97e51030e2203efd0b9bc0539c9916e58e7b50fb | c50a7263531e115cdbd285bb67de34fed32a41a1 | refs/heads/master | 2023-03-03T03:17:28.447086 | 2021-02-13T02:40:33 | 2021-02-13T02:40:33 | 259,444,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #!/usr/bin/python3
if __name__ == '__main__':
from calculator_1 import add, sub, mul, div
a = 10
b = 5
print('{} + {} = {}'.format(a, b, add(a, b)))
print('{} - {} = {}'.format(a, b, sub(a, b)))
print('{} * {} = {}'.format(a, b, mul(a, b)))
print('{} / {} = {}'.format(a, b, div(a, b)))
| [
"zoltanmoragarcia@gmail.com"
] | zoltanmoragarcia@gmail.com |
c04479133e596d0015f9df6569bf7d2c2283e6d1 | b23c6c02d9b54c987bca2e36c3506cf80fa28239 | /python databse connectivity progs/bind variable.py | a9bf8a8d9dcc71bd722251121197416765b6ba4e | [] | no_license | nishikaverma/Python_progs | 21190c88460a79f5ce20bb25d1b35f732fadd642 | 78f0cadde80b85356b4cb7ba518313094715aaa5 | refs/heads/master | 2022-06-12T14:54:03.442837 | 2020-05-08T10:28:58 | 2020-05-08T10:28:58 | 262,293,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import cx_Oracle
try:
conn=cx_Oracle.connect("system/oracle123@localhost/orcl")
print("connection established")
cur=conn.cursor()
print("cursor created!")
print("***********************")
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("***********************")
name=input("enter book name : ")
price=int(input("enter book price"))
cur.execute("Insert into Books (Book_name,Book_price)values(:1,:2)",(name,price))
n=cur.rowcount
print(n,'rows inserted')
conn.commit()
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("************************")
except(cx_Oracle.DatabaseError)as e:
print("Error in connectin: ",e)
finally:
if conn is not None:
cur.close()
print("curser closed!")
conn.close()
print("connection closed!")
| [
"nishika.verma@live.com"
] | nishika.verma@live.com |
c9e5718005ed29e21e424f7db8f4e8370bf44c9a | b23a9b67159c94d881513d3c19351d591329476f | /mc/OffPolicyMc.py | 2ef1b7efe5a29f7169ed8d14df05c038efa94954 | [] | no_license | zuofeng1997/rl | 1a0933296cd2319375fc10bf69fde74e0fe15be1 | 90d64bbc5335e255417b0d1a91b5555f17d4d8cb | refs/heads/master | 2020-03-22T11:28:41.107250 | 2018-08-21T07:15:40 | 2018-08-21T07:15:40 | 139,973,196 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,837 | py | from copy import deepcopy
import numpy as np
class FiniteMcModel:
def __init__(self,state_space,action_space,gamma=0.99,epsilon=0.3):
self.gamma = gamma
self.epsilon = epsilon
self.Q = None
if isinstance(action_space,int):
self.action_space = np.arange(action_space)
actions = [0]*action_space
self._act_rep = "list"
else:
self.action_space = action_space
actions = {k:0 for k in action_space}
self._act_rep = "dict"
if isinstance(state_space,int):
self.state_space = np.arange(state_space)
self.Q = [deepcopy(actions) for _ in range(state_space)]
else:
self.state_space = state_space
self.Q = {k:deepcopy(actions) for k in state_space}
self.count = deepcopy(self.Q)
self.C = deepcopy(self.Q)
def policy(self,action,state): #target policy
if self._act_rep == "list":
if action == np.argmax(self.Q[state]):
return 1
return 0
elif self._act_rep == "dict":
if action == max(self.Q[state], key=self.Q[state].get):
return 1
return 0
def behave(self, action, state): # behave policy
return self.epsilon / len(self.action_space) + (1 - self.epsilon) * self.policy(action, state)
def generate_returns(self,ep):
G = {} # return on state
cumC = 0 # cumulative reward
W = {}
w = 1
for tpl in reversed(ep):
observation, action, reward = tpl
G[(observation, action)] = cumC = reward + self.gamma * cumC
self.C[observation][action] += w
W[(observation,action)] = w
if self.policy(action,observation) == 0:
break
w = w*(1/self.behave(action,observation))
return G,W
def choose_action(self, policy, state):
probs = [policy(a, state) for a in self.action_space]
return np.random.choice(self.action_space, p=probs)
def update_Q(self,ep):
G,W = self.generate_returns(ep)
for s in G:
state,action = s
self.count[state][action] += 1
self.Q[state][action] += (W[(state,action)]/self.C[state][action])*(G[s]-self.Q[state][action])
def score(self,env,policy,n_samples=100):
rewards = []
for _ in range(n_samples):
observation = env.reset()
cum_rewards = 0
while True:
action = self.choose_action(policy, observation)
observation, reward, done, _ = env.step(action)
cum_rewards += reward
if done:
rewards.append(cum_rewards)
break
return np.mean(rewards)
| [
"1065504865@qq.com"
] | 1065504865@qq.com |
6388a3763a8737a12142d29fd6a3202fbd197144 | 31ff066c1e0b3d2e88326ca66584ef579484ba09 | /final/fractal/fractal/settings.py | ca82802c1d21423383704d39fceb886d1d6f8b8e | [] | no_license | Lightningbread76/YellowBrick | c82b757c4f2bf5fe6b6ce8c42efb231c1653575d | dd144da0e6676a0d6df669b8caeb7a9c5220c5e7 | refs/heads/main | 2023-04-15T15:30:48.179818 | 2021-04-23T16:39:07 | 2021-04-23T16:39:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Django settings for fractal project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'le%+hw+4r3v-(x@x_$j!l!28w#xar4fbm1$b5=b!r%+s+u3f$='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'spotify.apps.SpotifyConfig']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fractal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fractal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"quinnrelyea@gmail.com"
] | quinnrelyea@gmail.com |
f267f2ee5db997cb0bd07c85bbca1589021cfe18 | 407d602bc989d0a7c49a441e75332c7f4d4db01d | /id3.py | 1c4d73e15e0caba9027a78fe65f17c9d08d7f7d1 | [] | no_license | thkm0620/goodWordAttack | 902e25080f844c6461bb5e4bcf803c1dd1e2b3eb | 9785290abae4cf60ab3652d7b00cb0471347b11e | refs/heads/master | 2023-02-01T05:47:49.381339 | 2020-12-19T05:59:06 | 2020-12-19T05:59:06 | 322,638,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | # coding=utf-8
import openpyxl
import numpy as np
from cleanText import cleanString
from sklearn import tree
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
class id3Classifier:
check=0
model = tree.DecisionTreeClassifier(criterion="entropy")
vectorizer = TfidfVectorizer(stop_words='english', max_df=75)
# Get the original dataset
def store():
workBookOld = openpyxl.load_workbook('datasets/trainData.xlsx')
dataSheetOld = workBookOld['trainData']
xData = []
yData = []
rows = dataSheetOld.max_row
for i in range(2, rows+1):
if (str(dataSheetOld.cell(row = i, column = 2).value) != 'None'):
xData.append(str(cleanString(str(dataSheetOld.cell(row = i, column = 1).value))))
if (str(dataSheetOld.cell(row = i, column = 2).value) == "1"):
yData.append(1)
else:
yData.append(0)
# NOTE: to train data on the entire dataset, simply return xData and yData
# Splitting the data like this is to obtain test cases and calculate the F-score of the learning algorithm
xTrain, xTest, yTrain, yTest = train_test_split(xData, yData, test_size=0.2, random_state=0)
return xTrain, xTest, yTrain, yTest
# Calculating the F-score
def calcFScore(xTest, yTest, model, vectorizer):
xTestMatrix = vectorizer.transform(xTest)
yTestMatrix = np.asarray(yTest)
result = model.predict(xTestMatrix)
matrix = confusion_matrix(yTestMatrix, result)
fScore = f1_score(yTestMatrix, result, pos_label = 0)
precision = precision_score(yTestMatrix, result, pos_label=0)
recall = recall_score(yTestMatrix, result, pos_label=0)
return fScore, precision, recall, matrix
def predict(msg):
if id3Classifier.check==0:
# Create training data
xTrain, xTest, yTrain, yTest = id3Classifier.store()
id3Classifier.vectorizer = TfidfVectorizer(stop_words='english', max_df=75)
yTrainMatrix = np.asarray(yTrain)
xTrainMatrix = id3Classifier.vectorizer.fit_transform(xTrain)
# Training ID3 classifier
id3Classifier.model.fit(xTrainMatrix, yTrainMatrix)
fScore, precision, recall, matrix = id3Classifier.calcFScore(xTest, yTest, id3Classifier.model, id3Classifier.vectorizer)
print("fScore, precision, recall :")
print(fScore, precision, recall)
id3Classifier.check=1
return id3Classifier.predict2(msg,id3Classifier.vectorizer,id3Classifier.model)
# Test new data for Spam
def predict2(emailBody,vectorizer,model):
featureMatrix = vectorizer.transform([cleanString(emailBody)])
result = model.predict(featureMatrix)
if (1 in result):
#return "Spam"
return True
else:
#return "Not Spam"
return False
'''
print(id3Classifier.predict("FreeMsg: Claim ur 250 SMS messages-Text OK to 84025 now!Use web2mobile 2 ur mates etc. Join Txt250.com for 1.50p/wk. T&C BOX139, LA32WU. 16 . Remove txtX or stop"))
print(id3Classifier.predict("FREE for 1st week! No1 Nokia tone 4 ur mob every week just txt NOKIA to 87077 Get txting and tell ur mates. zed POBox 36504 W45WQ norm150p/tone 16+"))
print(id3Classifier.predict("I have a tad issue here about the thorough refining column"))
'''
| [
"thkm0620@naver.com"
] | thkm0620@naver.com |
9fc5c37346192644bc12a0b5a2a6817f98a26c5e | f384d0bd72edf56edc06b2bba84fc8ca8e73e6e5 | /calculos/salinidad.py | 61c1537b557a0c5704df400ab75a1f2a59373a73 | [] | no_license | jvaldesa/lia | ec44ed98f5efcd2cc1689040684a210dc6dadea1 | 7d362fb351142463f5e35bb0fbe8773efee5158c | refs/heads/master | 2020-04-04T20:33:56.740231 | 2019-05-07T14:53:01 | 2019-05-07T14:53:01 | 156,251,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | from decimal import Decimal
def cationes(titulacionCaMg, normalidadEDTA, titulacionCa, alicuota, Na, K):
titulacionCaMg = Decimal(titulacionCaMg)
normalidadEDTA = Decimal(normalidadEDTA)
titulacionCa = Decimal(titulacionCa)
alicuota = Decimal(alicuota)
Na = Decimal(Na)
K = Decimal(K)
"""
Ca + Mg (meq/L) = (ml titulación Ca+Mg * Normalidad EDTA * 1000)/ml Alicuota
Ca = ml titulación Ca
Mg (meq/L) = (Ca + Mg (meq/L)) - Ca
Na (meq/L) = Na(ppm)/23 => peso equivalente de Na = 23
k (meq/L) = Na(ppm)/39.1 => peso equivalente de Na = 39.1
"""
CaMg = (titulacionCaMg * normalidadEDTA * Decimal(1000)) / alicuota
Ca = titulacionCa
Mg = CaMg -Ca
Nameq = Na / Decimal(23)
Kmeq = K / Decimal(39.1)
Ca = round(Ca, 2)
Mg = round(Mg, 2)
Nameq = round(Nameq, 2)
Kmeq = round(Kmeq, 2)
return {'Ca': Ca, 'Mg':Mg, 'Na':Nameq, 'K':Kmeq}
def aniones(titulacionCar, titulacionBlancoCar, normalidadH2SO4, alicuotaCar, titulacionBic, titulacionBlancoBic, alicuotaBic, titulacionClo, titulacionBlancoClo, normalidadAgNO3, alicuotaClo, conductividadEl, unidad):
titulacionCar = Decimal(titulacionCar)
titulacionBlancoCar = Decimal(titulacionBlancoCar)
normalidadH2SO4 = Decimal(normalidadH2SO4)
alicuotaCar = Decimal(alicuotaCar)
titulacionBic = Decimal(titulacionBic)
titulacionBlancoBic = Decimal(titulacionBlancoBic)
alicuotaBic = Decimal(alicuotaBic)
titulacionClo = Decimal(titulacionClo)
titulacionBlancoClo = Decimal(titulacionBlancoClo)
normalidadAgNO3 = Decimal(normalidadAgNO3)
alicuotaClo = Decimal(alicuotaClo)
conductividadEl = Decimal(conductividadEl)
if unidad == 'µS/cm':
Ce = conductividadEl / 1000
elif unidad == 'mS/cm':
Ce = conductividadEl
"""
x = Volumen gastado en titulación carbonatos - volumen gastado en titulación blanco carbonatos
Carbonatos (meq/L) = (2 * x * Normalidad del H2SO4 * 1000) / mililitros de Alicuota Carbonatos
y = Volumen gastado en titulación bicarbonatos - volumen gastado en titulación blanco bicarbonatos
Bicarbonatos = (y - (2 * x) * Normalidad del H2SO4 * 1000) / mililitros de Alicuota Bicarbonatos
z = volumen gastado en titulación cloruros - volumen gastado en titulación blanco cloruros
Cloruros = (z * Normalidad AgNO3 * 1000) / mililitros de Alicuota Cloruros
Sulfatos = Conductividad electrica (mS/cm) * 10 -(Carbonatos + Bicarbonatos + Cloruros)
"""
x = titulacionCar - titulacionBlancoCar
y = titulacionBic - titulacionBlancoBic
z = titulacionClo - titulacionBlancoClo
Carbonatos = (2 * x * normalidadH2SO4 * 1000) / alicuotaCar
Bicarbonatos = ((y - (2 * x)) * normalidadH2SO4 * 1000) / alicuotaBic
Cloruros = (z * normalidadAgNO3 * 1000) / alicuotaClo
Sulfatos = Ce * 10 -(Carbonatos + Bicarbonatos + Cloruros)
Carbonatos = round(Carbonatos, 2)
Bicarbonatos = round(Bicarbonatos, 2)
Cloruros = round(Cloruros, 2)
Sulfatos = round(Sulfatos, 2)
return {'Carbonatos':Carbonatos, 'Bicarbonatos': Bicarbonatos, 'Cloruros': Cloruros, 'Sulfatos':Sulfatos}
| [
"jvaldes_a@hotmail.com"
] | jvaldes_a@hotmail.com |
a0f39619acc90f05c7ecc8ea66be9b18ee6058d7 | a63edabd559753582d464460afe0d8f2a3377b37 | /SpiderNode/preWork.py | 3ab2c0e734bfd7d77290f157c6363853e21188ad | [] | no_license | liangxs0/lagouCrawler | 6324d688a10fde86ecf813e696c164e4dbe72cf5 | d17d51e37f7078923e00baef40fc2440eeebf059 | refs/heads/master | 2020-03-13T17:32:31.645516 | 2018-04-15T14:09:15 | 2018-04-15T14:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #-*-coding:utf-8-*-
import requests
from lxml import etree
import json
import urllib.parse
#获取首页的工作分类
def getIndexPageJob():
url="https://www.lagou.com/"
header={'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36"}
proxies={"http":"http://127.0.0.1:50157"}
html=requests.get(url,headers=header,proxies=proxies)
jobs=[]
if html.status_code==200:
html_parser=etree.HTML(html.text)
menu_sub=html_parser.xpath('//*[@id="sidebar"]/div/div[1]/div[2]')[0]
dls=menu_sub.xpath('./dl')
for dl in dls:
info=dl.xpath('./dd/a/text()')
jobs.extend(info)
print(jobs)
return jobs
#获取全部的热点城市
def getAllCity():
url="https://www.lagou.com/"
url_zhaopin="https://www.lagou.com/jobs/list_C%2B%2B?px=default&city=%E5%85%A8%E5%9B%BD#filterBox"
header={'User-Agent':'Mozilla/5.0(Macintosh;U;IntelMacOSX10_6_8;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50',
'Host': 'www.lagou.com',
'Referer': 'https://www.lagou.com/',
}
session=requests.Session()
session.get(url,headers=header)
proxies={"http":"http://127.0.0.1:50157"}
html=session.get(url_zhaopin,headers=header,proxies=proxies)
if html.status_code==200:
html_parser=etree.HTML(html.text)
hot=html_parser.xpath('//li[@class="hot"]/a[@class="more-city-name"]/text()')
other=html_parser.xpath('//li[@class="other"]/a[@class="more-city-name"]/text()')
cities=hot+other
print(cities)
for i in range(len(cities)):
cities[i]=urllib.parse.quote(cities[i])
return cities
"""if __name__=='__main__':
cities=getAllCity()
print(cities)"""
| [
"lrtxpra@163.com"
] | lrtxpra@163.com |
7e389d1992dc7ee70f3f25ef939fa71ea01b2db2 | 3b64e5707567bfeed3bf69fdedc7702a75677115 | /List-2/count_evens.py | f9c71645d93dc0fd6fe9ed479bac07775f67eb85 | [] | no_license | fearlessfreap24/codingbatsolutions | 1b730f4e76da6c6dca9670dcea3f3092ba690d44 | 7d3bcd6f347db41794572700d05e87db3f0ca478 | refs/heads/master | 2020-03-25T04:03:01.802798 | 2018-08-18T11:47:17 | 2018-08-18T11:47:17 | 143,374,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | def count_evens(nums):
sum = 0
for i in range(len(nums)):
if nums[i] % 2 == 0:
sum += 1
return sum
if __name__ == "__main__":
print(count_evens([2, 1, 2, 3, 4])) # 3
print(count_evens([2, 2, 0])) # 3
print(count_evens([1, 3, 5])) # 0
| [
"dylanaperez@yahoo.com"
] | dylanaperez@yahoo.com |
be3f6184a13b0ec8ef97011496b815a287a03c3b | 6a76709e8a7fbcdfc3372120a4086718a07a270a | /dataset/dataset.py | 8517ccd17567d2c124b6211acfeb950ee765e64f | [] | no_license | jungdaechul-coderepo/EfficientDet | 17e831f5146934e276e243529709b94bbfbfa3f1 | ff132eb985676edc6df8e6c5a629f4974df47010 | refs/heads/main | 2023-05-12T08:28:57.840999 | 2020-11-26T08:54:11 | 2020-11-26T08:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | import torch
import torch.nn as nn
import os
import glob
from PIL import Image
import time
import cv2
class Retina_dataset(object):
def __init__(self, input_dir,transform, input_format='RGB', output_format='RGB'):
self.input_imgs=[]
self.input_format=input_format
self.output_format=output_format
self.transform=transform
if input_dir is not None:
for ext in ['jpeg', 'jpg', 'png', 'JPEG', 'PNG', 'JPG']:
self.input_imgs+=sorted(glob.glob('{}/*.{}'.format(input_dir, ext)))
# self.transform_rot=[0, 90, 180, 270]
def __getitem__(self,idx):
img=Image.open(self.input_imgs[idx])
start_time=time.time()
if self.transform:
img=self.transform(img)
total_time=time.time()-start_time
return img, total_time
def __len__(self):
return print(len(self.input_imgs))
class Retina_dataset_albumentation(object):
def __init__(self, input_dir,transform, input_format='RGB', output_format='RGB'):
self.input_imgs=[]
self.input_format=input_format
self.output_format=output_format
self.transform=transform
if input_dir is not None:
for ext in ['jpeg', 'jpg', 'png', 'JPEG', 'PNG', 'JPG']:
self.input_imgs+=sorted(glob.glob('{}/*.{}'.format(input_dir, ext)))
# self.transform_rot=[0, 90, 180, 270]
def __getitem__(self,idx):
img=cv2.imread(self.input_imgs[idx])
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img=Image.open(self.input_imgs[idx])
start_time=time.time()
if self.transform:
augmented=self.transform(image=img)
img=augmented['image']
total_time=time.time()-start_time
return img, total_time
def __len__(self):
return print(len(self.input_imgs)) | [
"bigpicture.jh@gmail.com"
] | bigpicture.jh@gmail.com |
3e50eb432278799573f1eb0faf52a1893f21fc48 | 3fe6529e3733a0d703e3ce12790e90195037ada0 | /interview/interview/urls.py | 5859e55f97526d0abfd0db7dc90aa30c0124b0e5 | [] | no_license | vkit/zypol | cc536b66b45333211f7be3ee131a4325665fd2d6 | db7306c32f473b6852b3561f97ca9c8c50be2709 | refs/heads/master | 2021-07-09T08:11:49.920719 | 2017-10-06T11:40:37 | 2017-10-06T11:40:37 | 105,997,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | """interview URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^student/',include('student.urls')),
] | [
"noreply@github.com"
] | vkit.noreply@github.com |
b48a2e29d81c5d7ddbf5cc76cd714fe6c1483872 | 9e27f91194541eb36da07420efa53c5c417e8999 | /twilio/twiml/messaging_response.py | abb58ff2c6d33ad1d66998d8f9520dd3786f329a | [] | no_license | iosmichael/flask-admin-dashboard | 0eeab96add99430828306b691e012ac9beb957ea | 396d687fd9144d3b0ac04d8047ecf726f7c18fbd | refs/heads/master | 2020-03-24T05:55:42.200377 | 2018-09-17T20:33:42 | 2018-09-17T20:33:42 | 142,508,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from admin.twilio.twiml import (
TwiML,
format_language,
)
class MessagingResponse(TwiML):
""" <Response> TwiML for Messages """
def __init__(self, **kwargs):
super(MessagingResponse, self).__init__(**kwargs)
self.name = 'Response'
def message(self, body=None, to=None, from_=None, action=None, method=None,
status_callback=None, **kwargs):
"""
Create a <Message> element
:param body: Message Body
:param to: Phone Number to send Message to
:param from: Phone Number to send Message from
:param action: Action URL
:param method: Action URL Method
:param status_callback: Status callback URL. Deprecated in favor of action.
:param kwargs: additional attributes
:returns: <Message> element
"""
return self.nest(Message(
body=body,
to=to,
from_=from_,
action=action,
method=method,
status_callback=status_callback,
**kwargs
))
def redirect(self, url, method=None, **kwargs):
"""
Create a <Redirect> element
:param url: Redirect URL
:param method: Redirect URL method
:param kwargs: additional attributes
:returns: <Redirect> element
"""
return self.nest(Redirect(url, method=method, **kwargs))
class Redirect(TwiML):
""" <Redirect> TwiML Verb """
def __init__(self, url, **kwargs):
super(Redirect, self).__init__(**kwargs)
self.name = 'Redirect'
self.value = url
class Message(TwiML):
""" <Message> TwiML Verb """
def __init__(self, body=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.name = 'Message'
if body:
self.value = body
def body(self, message, **kwargs):
"""
Create a <Body> element
:param message: Message Body
:param kwargs: additional attributes
:returns: <Body> element
"""
return self.nest(Body(message, **kwargs))
def media(self, url, **kwargs):
"""
Create a <Media> element
:param url: Media URL
:param kwargs: additional attributes
:returns: <Media> element
"""
return self.nest(Media(url, **kwargs))
class Media(TwiML):
""" <Media> TwiML Noun """
def __init__(self, url, **kwargs):
super(Media, self).__init__(**kwargs)
self.name = 'Media'
self.value = url
class Body(TwiML):
""" <Body> TwiML Noun """
def __init__(self, message, **kwargs):
super(Body, self).__init__(**kwargs)
self.name = 'Body'
self.value = message
| [
"michaelliu@iresearch.com.cn"
] | michaelliu@iresearch.com.cn |
cbd142b626698fe1debd6ecef0822cc0d7b13f7f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_150/ch50_2020_04_13_03_25_44_929209.py | a262c1522f55ac719f56e8c2e06b6e69fde73ed5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def junta_nome_sobrenome(nome, sobrenome):
nome_e_sobrenome = []
i = 0
while i < len(nome) and i < len(sobrenome):
nome_e_sobrenome.append(nome[i] + ' ' +sobrenome[i])
i += 1
return nome_e_sobrenome | [
"you@example.com"
] | you@example.com |
ced5ac21aec05c62ab3a5d490799f6547ae76833 | 40686808bb915db93c03069bfe4ab9ec73663c19 | /classification_testing.py | dac47c7d32750fcf6ad52f3914b4abf012619c31 | [] | no_license | AzogDefiler/classification_wav | 340f2be9352c284fc23d6f3df3d38a15d30d97a6 | 3a36d4533f13afda181a8f6a442b879cb7a8fa72 | refs/heads/master | 2020-03-22T03:25:43.989167 | 2018-07-02T19:21:09 | 2018-07-02T19:21:09 | 139,431,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,696 | py |
# coding: utf-8
# In[1]:
# Aminov Rezo
import numpy as np
import wave
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from glob import glob
import random
import struct
from keras.models import *
from keras.layers import *
from keras.callbacks import *
import librosa
import soundfile as sf
from keras.models import load_model
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
# In[2]:
DATA_DIR = 'data_v_7_stc'
meta_file = "{}/meta/meta.txt".format(DATA_DIR)
df = pd.read_csv(meta_file, sep='\t',header=None)
labels_name = df[4].unique()
# In[3]:
# кодирование лейблов
onehot_dict = {}
for ii, lab in enumerate(labels_name):
y_ = np.zeros(len(labels_name))
y_[ii] = 1
onehot_dict.update({lab:ii})
# обратный хэш
hot_to_one = {}
for k,v in onehot_dict.items():
hot_to_one.update({v:k})
# In[4]:
# экстрактор фич: Мел-кепстральные коэффициенты (MFCC). https://habr.com/post/140828/
def extract_feature(file_name):
X, sample_rate = sf.read(file_name, dtype='float32')
if X.ndim > 1:
X = X[:,0]
X = X.T
# преобразование Фурье
stft = np.abs(librosa.stft(X))
# MFCC
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=128).T,axis=0)
# chroma
# chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
# мэл спектр
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
# спектр-ный контраст
# contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
# tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
return mfccs,mel
# In[5]:
files_test = glob(DATA_DIR+'/test/*.wav')
# In[6]:
model = load_model('weights/model.hdf5')
model.load_weights('weights/model_weights.hdf5')
# In[7]:
CNT=0 # кол-во всех не 'unknown', подмножество 'A'
GOOD=0 # кол-во правильно опред-ых файлов в подмножестве 'A'
BAD=0 # кол-во не правильно опред-ых файлов в подмножестве 'A'
filew = open("result.txt","a")
features_test = np.empty((0,256))
for file in files_test:
try:
mfccs,mel = extract_feature(file)
except Exception as e:
print("[Error] extract feature error. %s" % (e))
continue
ext_features_test = np.hstack([mfccs,mel])
# features_test = np.vstack([features_test,ext_features_test])
pred = model.predict(np.expand_dims([ext_features_test],axis=2))
score = pred.max()
class_ = hot_to_one[np.argmax(pred)]
filename = file.split('/')[2]
filew.write(filename+'\t'+str(score)+'\t'+class_+'\n')
print(filename+' '+str(score)+' '+class_)
# если файл не 'unknown', делаю подсчет совпадений лейбла и наз. файла
# примерный подсчет, т.к. неизвестно к какому классу относятся файлы
# с наз. 'unknown'
if 'unknown' not in filename:
CNT+=1
if class_ in filename:
GOOD+=1
else:
BAD+=1
filew.close()
# In[8]:
CNT, GOOD, BAD
# In[9]:
GOOD/CNT
# In[ ]:
| [
"aminov@skytracking.ru"
] | aminov@skytracking.ru |
9379fb1cf8f8a0ce4ebdd89c1b183018f97816d5 | 7b82cd6d0c8d7eb968ec4eb2dc7020316880e690 | /aswissues/migrations/0013_auto_20191119_2241.py | e3afc2b41389cc02b6ce0652b70c209ab9ffecb5 | [] | no_license | kennyangelfib/ASW-Issue-Tracker | abbeed5e5517f74beb66a06225df19de926fd250 | d9087ad422b6f97de8854bc936943001387d168e | refs/heads/master | 2022-04-11T21:27:46.933937 | 2020-01-14T13:43:37 | 2020-01-14T13:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | # Generated by Django 2.2.7 on 2019-11-19 22:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aswissues', '0012_merge_20191119_2215'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='issue',
name='assignee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Assignee', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='issue',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Creator', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='vote',
name='voter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='watch',
name='watcher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"andreugallofre@gmail.com"
] | andreugallofre@gmail.com |
4450a20d24354e763675dde15bcfdb903c38b2d4 | f2982bb689fa9eecbf1503d0db5688923802d507 | /KRTimedDraw.py | efdfe94a8b8fce5b537587dcff98902d9ccd6bc1 | [] | no_license | KareshiKraise/timeDrawingHelper | 448d0340e1103f0a52f075137001c89c029b1247 | 809f2fa136300b04d8c27250d7f2165dfd73dd7f | refs/heads/main | 2023-06-04T06:50:17.993974 | 2021-06-22T21:34:38 | 2021-06-22T21:34:38 | 358,704,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,626 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\timedHelper.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
#
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import os
import random
import cv2
import qdarkstyle
# supported file extensions
# [".jpg", ".jpeg", ".png", ".gif"]
os.environ['QT_API'] = 'pyqt5'
max_res = (640,640)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(480, 640)
# MainWindow.setStyleSheet("background-color: rgb(150, 150, 150);")
MainWindow.setLocale(
QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedKingdom)
)
MainWindow.setAnimated(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
# My vars----------------------------------------------
self.wndHandle = MainWindow
self.timer_ = 60
self.counter_ = self.timer_
self.dir_ = "C:/"
self.imgList_ = []
self.lbl = QtWidgets.QLabel(self.centralwidget)
self.lbl.setGeometry(QtCore.QRect(0, 0, 480, 640))
# -----------------------------------------------------
self.res_ = (480, 640)
self.widRect_ = QtCore.QRect()
self.spinBoxHidden = True
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setGeometry(QtCore.QRect(410, 0, 71, 31))
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(24)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.spinBox.setFont(font)
self.spinBox.setStyleSheet('font: 24pt "Calibri";')
self.spinBox.setObjectName("spinBox")
self.spinBox.setValue(self.counter_)
self.spinBox.setMinimum(1)
self.spinBox.setMaximum(600)
self.spinBox.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
self.spinBox.show()
self.showTimeLabel = QtWidgets.QLabel(self.centralwidget)
self.showTimeLabel.setGeometry(QtCore.QRect(410, 0, 71, 31))
self.showTimeLabel.setFont(font)
self.showTimeLabel.setStyleSheet('font: 24pt "Calibri";')
self.showTimeLabel.setObjectName("showTimeLbl")
self.showTimeLabel.setText(str(self.counter_))
self.showTimeLabel.hide()
self.showTimeLabel.setStyleSheet("color: rgba(255, 255, 255, 255);")
# -----------------------------------------------------
MainWindow.setCentralWidget(self.centralwidget)
self.toolBar = QtWidgets.QToolBar(MainWindow)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(12)
self.toolBar.setFont(font)
self.toolBar.setObjectName("toolBar")
self.toolBar.setMovable(False)
self.toolBar.setFloatable(False)
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionPath = QtWidgets.QAction(MainWindow)
self.actionPath.setObjectName("actionPath")
self.actionStart_Stop = QtWidgets.QAction(MainWindow)
self.actionStart_Stop.setObjectName("actionStart_Stop")
self.actionSetTime = QtWidgets.QAction(MainWindow)
self.actionPath.setObjectName("setTimeEnable")
self.toolBar.addAction(self.actionPath)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionStart_Stop)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionSetTime)
self.toolBar.addSeparator()
# additional logic--------------------------------------
self.is_running_ = False
self.clock = QTimer()
self.clock.timeout.connect(self.onTimeout)
self.clock.start(self.timer_)
self.clock.setInterval((1000))
self.actionPath.triggered.connect(self.browseFolder)
self.actionStart_Stop.triggered.connect(self.start)
self.actionSetTime.triggered.connect(self.enableSpinBox)
self.spinBox.valueChanged.connect(self.setTime)
self.checkConfig()
# ------------------------------------------------------
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def computeWidRect(self):
width = self.wndHandle.geometry().width()
height = self.wndHandle.geometry().height()
xpos = (width - 71, width)
ypos = (0, 31)
return xpos, ypos
def checkConfig(self):
exists = False
if os.path.isfile("folders.config"):
print("Found folders.config, will load images from there")
with open("folders.config") as f:
content = f.readlines()
if len(content) > 0:
self.dir_ = content[0]
else:
self.dir_ = "C:/"
exists = True
self.imgList_ = [
self.dir_ + "/" + f
for f in os.listdir(self.dir_)
if f.endswith(".jpg") or f.endswith(".jpeg") or f.endswith(".png")
]
else:
print("File folders.config doesnt exist. Will create.")
return exists
def enableSpinBox(self):
self.is_running_ = False
if not self.spinBoxHidden:
self.spinBox.show()
self.showTimeLabel.hide()
else:
self.showTimeLabel.setText(str(self.timer_))
self.showTimeLabel.show()
self.spinBox.hide()
self.spinBoxHidden = not self.spinBoxHidden
def browseFolder(self):
self.dir_ = QFileDialog.getExistingDirectory(
None, "Select a folder:", "C:\\", QFileDialog.ShowDirsOnly
)
f = open("folders.config", "w")
f.write(self.dir_)
f.close()
if self.dir_ == "" or self.dir_ == None:
self.dir_ = "C:/"
else:
self.imgList_ = [
self.dir_ + "/" + f
for f in os.listdir(self.dir_)
if f.endswith(".jpg") or f.endswith(".jpeg") or f.endswith(".png")
]
self.is_running_ = False
return
def start(self):
self.counter_ = self.timer_
self.is_running_ = not self.is_running_
if self.is_running_ == True:
if len(self.imgList_) == 0:
print("couldnt find image list")
self.is_running_ = False
else:
self.showTimeLabel.setText(str(self.timer_))
print("drawing at start")
randomVal = random.randint(0, len(self.imgList_) - 1)
self.draw(randomVal)
return
def resize(self, w, h):
self.wndHandle.resize(w, h)
self.lbl.setGeometry(QtCore.QRect(0, 0, w, h))
xpos, ypos = self.computeWidRect()
self.spinBox.setGeometry(QtCore.QRect(xpos[0], ypos[0], xpos[1], ypos[1]))
self.showTimeLabel.setGeometry(QtCore.QRect(xpos[0], ypos[0], xpos[1], ypos[1]))
def setTime(self):
if self.is_running_:
self.is_running_ = False
self.timer_ = self.spinBox.value()
self.counter_ = self.timer_
self.showTimeLabel.setText(str(self.counter_))
def onTimeout(self):
if len(self.imgList_) > 0:
if self.is_running_ == True:
if self.counter_ <= 0:
randomVal = random.randint(0, len(self.imgList_) - 1)
self.draw(randomVal)
self.counter_ = self.timer_
self.showTimeLabel.setText(str(self.counter_))
self.counter_ = self.counter_ - 1
return
def draw(self, randomVal):
cvImg = cv2.imread(self.imgList_[randomVal])
height, width, _ = cvImg.shape
cvImg = cv2.cvtColor(cvImg, cv2.COLOR_RGBA2BGR)
bytesPerLine = 3 * width
aspect = float(height)/float(width)
resh = 0
resw = 0
resize = False
if height > max_res[1]:
resh = max_res[1]
resw = int(resh/aspect)
resized = True
if width > max_res[0]:
resw = max_res[0]
resh = int(resw*aspect)
resize= True
if resize:
cvImg = cv2.resize(cvImg, (resw,resh), interpolation = cv2.INTER_CUBIC)
height = resh
width = resw
bytesPerLine = 3 * width
resize = False
qImg = QImage(
cvImg.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888
)
self.resize(width, height)
self.lbl.setPixmap(QtGui.QPixmap.fromImage(qImg))
return
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionPath.setText(_translate("MainWindow", "Path"))
self.actionPath.setToolTip(
_translate(
"MainWindow", "Path to your image folder, shortcut: Ctrl+Shift+F"
)
)
self.actionPath.setShortcut(_translate("MainWindow", "Ctrl+Shift+F"))
self.actionStart_Stop.setText(_translate("MainWindow", "Start/Stop"))
self.actionStart_Stop.setToolTip(
_translate("MainWindow", "Start the Timed Drawing, shortcut: Ctrl+Space")
)
self.actionStart_Stop.setShortcut(_translate("MainWindow", "Ctrl+Space"))
self.actionSetTime.setText(_translate("MainWindow", "Set Time"))
self.actionSetTime.setToolTip(
_translate("MainWindow", "Enable Set Time spinbox")
)
self.actionSetTime.setShortcut(_translate("MainWindow", "Ctrl+T"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet())
MainWindow = QtWidgets.QMainWindow()
MainWindow.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"vitormoraesaranha@gmail.com"
] | vitormoraesaranha@gmail.com |
2b9d7f843e20d0de53d6b81d06487d22edf5c41c | ea93c59912c4654a87c06eba3dd4b7eda989463b | /my_blog_project/my_blog_project/asgi.py | 85753b9ab25a95115448a630ebf196277b6e070a | [] | no_license | ruhulamin1998/django-blog-project | e88b3fd8ef457c7eb4ee3149fa0f1969f5489c73 | 1f11f9eb2015a0cfb98dfb720c0379eee107e51c | refs/heads/main | 2023-03-04T20:47:59.453834 | 2021-02-09T06:25:34 | 2021-02-09T06:25:34 | 337,301,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
ASGI config for my_blog_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_blog_project.settings')
application = get_asgi_application()
| [
"ruhulamin.raj1998@gmail.com"
] | ruhulamin.raj1998@gmail.com |
708ef071fd1faafca78472a84eda933ffbb64c07 | 1702a4ffbc68266e4f232389f7a5f9cc44cb489b | /hello.py | 6575b82784245fb5e88b9822dc71c860e58085ec | [] | no_license | Caspeezie/TDDAssignments | 6d0c988b458797bc0826d1e1cf338df15d337f4c | c8830df877b85025c4064dc1c8933b8915d88aad | refs/heads/master | 2022-12-22T08:16:23.884642 | 2020-09-15T11:29:49 | 2020-09-15T11:29:49 | 276,622,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #This program says hello and asks for your name
print('Hello World')
print('What is your name?')
myName = input()
print('It is good to meet you, ' +myName)
| [
"noreply@github.com"
] | Caspeezie.noreply@github.com |
e36fa96a6b594bb9c54e0f68eb1cdc6daad6b8e1 | 441508533c0f46814e5c5248ca31858afc3a2f7e | /a/b.py | ecf6c8fc51e30df4ea2a6b660a31b92d01ab8553 | [] | no_license | phiresky/poetry-install-self-issue | 7e72ef08821959ed346163756e2ed51f00fc0148 | 47b2e7842555c94f9f07ed80096b48010e6a9b1e | refs/heads/master | 2020-04-10T16:29:32.768434 | 2018-12-10T15:32:03 | 2018-12-10T15:32:03 | 161,147,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | import c.x
print(c.x.test)
| [
"robin.ruede@philips.com"
] | robin.ruede@philips.com |
a1816820f2b5b9ae3603a6eb633cc4c41cc5b75c | 4825a6b905d49c787996058e23e99f2d1785e3d4 | /Lab12.py | a7f8dbb2c50fe805cb29362c411bfac41519aff8 | [] | no_license | dylansidney25/Lab-12 | 510fe50ab3b67d06b5feb913c2cc4dd836524794 | 0d00ac2966afb53f4799af6fb0488beb4f834ebc | refs/heads/main | 2023-03-29T19:31:27.323802 | 2021-04-02T20:18:00 | 2021-04-02T20:18:00 | 354,121,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | import cv2 as cv
import sys
img = cv.imread('dragon2.png') #Selects the file to be used
img = cv.resize(img,None,fx=3, fy=3, interpolation = cv.INTER_CUBIC) #This resized the window
if img is None: #Checks if there is a file
sys.exit("The image could not be read.") #Closes the window
cv.imshow("OpenCV Image", img) #Shows the image with the title given
cv.waitKey(0) #Waits untill a key is pressed
cv.destroyAllWindows() #closed the window | [
"noreply@github.com"
] | dylansidney25.noreply@github.com |
5d301c3d4a4175d621b5d2c952a421bcdec1afd2 | 8683b28b47acceb6fc7677dcea3248a926fb95dd | /cities/views.py | 66bc591e3bb5cd2b59f0018e82f5e910ccdf6d71 | [] | no_license | pavhrablis/find_routes | e2463714a94ee90411634934064cd56402d247ce | 228cdef0c553bab9f4c1328c3263209c974b8c57 | refs/heads/master | 2023-02-17T18:22:00.505675 | 2021-01-21T10:39:23 | 2021-01-21T10:39:23 | 330,508,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView, ListView
from cities.forms import CityForm
from cities.models import City
__all__ = (
#'home',
'CityDetailView',
'CityCreateView',
'CityUpdateView',
'CityDeleteView',
'CityListView',
)
# def home(request, pk=None):
# if request.method == "POST":
# form = CityForm(request.POST)
# if form.is_valid():
# form.save()
# form = CityForm()
# cities = City.objects.all()
# paginator = Paginator(cities, 2)
# page_number = request.GET.get('page')
# page_obj = paginator.get_page(page_number)
# context = {'page_obj': page_obj, 'form': form}
# return render(request, 'cities/home.html', context)
class CityDetailView(DetailView):
queryset = City.objects.all()
template_name = 'cities/detail.html'
class CityCreateView(SuccessMessageMixin, CreateView):
model = City
form_class = CityForm
template_name = 'cities/create.html'
success_url = reverse_lazy('cities:home')
success_message = "City successfully created"
class CityUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = City
form_class = CityForm
template_name = 'cities/update.html'
success_message = "City successfully changed"
class CityDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):
model = City
success_url = reverse_lazy('cities:home')
def get(self, request, *args, **kwargs):
messages.success(request, "City successfully deleated")
return self.post(request, *args, **kwargs)
class CityListView(ListView):
model = City
paginate_by = 5
template_name = 'cities/home.html'
form = CityForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = CityForm()
context['form'] = form
return context
| [
"palik1hrablis@gmail.com"
] | palik1hrablis@gmail.com |
45d758380acbf60b955a49e002bc95f70394a173 | d919fff21345c9553bd3095f639a8049be53c3d6 | /OrderPizza/migrations/0004_subtoppings.py | efb945020d66d556e8ae6ac99e546a4aca2c0f70 | [] | no_license | parathan/Pizza-Place | 36ae461e828c1191db9829cd5d870b820236acde | 56511f02df5d28196422f85f25a8230e3e730713 | refs/heads/master | 2022-07-24T20:40:26.603536 | 2020-05-23T16:24:36 | 2020-05-23T16:24:36 | 263,726,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | # Generated by Django 3.0.6 on 2020-05-15 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('OrderPizza', '0003_auto_20200514_2114'),
]
operations = [
migrations.CreateModel(
name='SubToppings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=50)),
('smallprice', models.DecimalField(decimal_places=2, max_digits=10)),
('largeprice', models.DecimalField(decimal_places=2, max_digits=10)),
],
options={
'verbose_name_plural': 'SubToppings',
},
),
]
| [
"parathan243@gmail.com"
] | parathan243@gmail.com |
69f0025f1980926d00241e4f3d009bc18b75f4f2 | dfca79dd44910df779eb33a5e9a5d2d6689eb23a | /gifs/migrations/0002_auto_20180818_2349.py | 993068e30a6674542c51f747984712ea60a1ad58 | [] | no_license | madevelascom/topgifs | 193094961fbdbc2773fa03cda1d0eade8e81b4dc | 94bfdc20eebda746f70148c0318f6ab23f31bbf6 | refs/heads/master | 2020-03-26T19:40:03.405553 | 2018-08-23T07:39:09 | 2018-08-23T07:39:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Generated by Django 2.1 on 2018-08-19 04:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gifs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='gif',
name='contador',
field=models.IntegerField(default='0'),
),
migrations.AddField(
model_name='gif',
name='descripcion',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='gif',
name='url',
field=models.TextField(default='#'),
),
migrations.AlterField(
model_name='gif',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
| [
"m_velasco93@hotmail.com"
] | m_velasco93@hotmail.com |
1bbf137aa6b4ac31b07c38f1907aa7110050b58e | 045385ba95f62658d15688e7dade4e7e618ebb08 | /report.py | c7c92504a04ca5fc40db5f9ceb120fc9823c7131 | [] | no_license | tarunbodapati/player.py | eb04739737223ed0e185f6cf62389a444e61875f | 97587573587dfec1a3b3f75b41bcb4cb4f3f741d | refs/heads/master | 2020-06-21T05:33:25.378068 | 2020-02-25T10:00:16 | 2020-02-25T10:00:16 | 197,356,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,250 | py | A NOVELL APPROACH FOR DETECTION OF LUNG TUMOUR
Abstract:
Delineation of lung tumor from adjacent tissue from different computed tomography images (CT) creates many difficulties due to the similarities in images. In the surrounding areas as well as it influence the respiration. so accurate segmentation and classification is required in order to detect the present condition of a person. So in order to apply the perfect radiation therapy to the person without effecting the remaining tissues besides the tumor part. we are going to use neural networks for classification so that to obtain the accuracy and to improve the parameters for the classification and proper diagnosing of tumor . this process uses the better classification of tumor to detect and to give the treatment. Therefore we uses the process of dwt for enhancing the image and median filter to remove any noise and to use the GLCM (gray level co occurrence matrix) for feature extraction and to use the process of neural network classification for proper classification to detect the tumor is in initial stage (benign) or it is in final stage (malignant).
Introduction:
There are many un necessary cells that are grown in our body the growing of these cells in an un even way in our body can cause many kind of problems. If these kind of cells grows unevenly will make risk to the life of an living being. In bad conditions the growth will be uncertain . these lung tumors are mainly formed by cigarettes and also by using different kind of eatables so that these are the main causes to grow these cells in an uncertain and also in abnormal manner. these tumors can also mainly contain of different stages . they make the living beings to get in to a situation that weakens the functions of lungs. Of mixing the functions of oxygen from blood.it is the main causes of cancer in many human beings. The percentage of cancer that most effected by human beings is lung cancer because of unnecessary activities like smoking. the percentage of the people died because of the lung cancer is very high. Approximately 154,050 people died due to lung cancer in 2018. And also the rate has been decreased in 2019 due to the awareness about cancer in some of rural areas 159,292 is in the year 2005. So many people uses the different techniques in order to avoid lung cancer. These cancer occurs due to the uneven growth of tumor in lungs. It is very required for any people to detect lung cancer in the initial stages to save one’s lives. The main purpose of this is to classify it as benign or malignant. The main purpose of this technique is to use the segmentation and classifying the tumor that it is more accurate and to detect the tumor fast and to diagnose in the initial stage to cure it. There is no cure for final stage of cancer. to diagnose it we need to identify it in the initial stage itself. We are going to use Matlab so that to process the image and to enhance the image and to convert the image from gray to binary and also to use the process of discrete wavelet transform for it and also to use fuzzy means clustering for segmenting the image . we have used feature extraction technique called GLCM to extract the features of the images . and we are using neural networks for classification. Here in matlab software we are going to create a GUI and by placing different blocks in GUI. So that by using code we need to extract different processes on the GUI. To process this we need a CT scan image that is computed tomography images to perform operations on the image using coding format. So we are going to use these CT scan images and they are essential in image modelling .
LITERAURE SURVEY:
This section represents the survey for classification of the tumor from previous references. There are many planning and previous references that are given in order to develop a different set of parameters and different types of algorithms are used to perform different types of techniques are followed in order to get the required output in different ways
.[1] Humera shaziya etal presented the automatic lung segmentation on thoracic CT scans using U NET Convolutional network they have used unet convolutional networks, deep learning, automatic lung segmentation. .
[2] ling zhang,etal used an algorithm such that self learning to detect and segment cysts in lung CT images without manual annotation it was also published in the year of 2018 using unsupervised segmentation,, segmentation networks.
[3] lilkAnifah,etal used a technique to detect cancer lungs detection and ct scan image using artificial neural network back propagation based gray level co occurence matrices feature. It was published in year 2017 artificial neural network back propagation based gray level co occurrence matrices.
[4] Tiany Zhao,et al uses a technique on lung segmentation in CT images using a fully convolutional neural networks Tiany Zhaol,etal it was published in the year 2018 . it was done with the help of convolutional neural network.
[5] sneha pothagam uses a technique named multilayer perception based lung tumor classification in the year 2018 algorithm used is k nearest neighbor algorithm
[6] k.gopi uses a technique named lung tumor area classification (recognition) using EKmean clustering and SVM and the technique used is SVM,EK thresholding.
[7] zigha zong uses a technique called 3D fully convolutional N/w’s for cosegmentation of tumors on PETCT images and algorithm used is 3DUnet and graph cut based cosegmentation.
[8] sheenan ratnam used a technique optimized lung cancer classification computed tomography the algorithm used is BAT algorithm and artificial neural networks.
Methodology:
We are going to use different set of images like normal,benign,malignant to check the image processing for image classification. We are going to use median filter for image preprocessing, and to remove any noise and we used glcm for feature extraction and also we are going to use DWT discrete wavelet transform is used for image enhancement. And we have used neural networks for classification.
| [
"noreply@github.com"
] | tarunbodapati.noreply@github.com |
739b69b48836c90b0897da6f89d31fa83298ca3a | ca683bdfd7e3a02056965f8f5452daa232455fe2 | /users/admin.py | e38ab5c53af47e20d9c8940e891f0af9da8c269a | [] | no_license | GuillermoRuizDev/DJango_RedSocial | 63e890b6ceae20ef78b382ab80850f94902ee67f | 19e77b3a2c924fec516426565843c87a6c3dc638 | refs/heads/main | 2023-06-27T00:21:13.059760 | 2021-08-01T12:17:46 | 2021-08-01T12:17:46 | 378,261,983 | 0 | 0 | null | 2021-07-26T09:05:02 | 2021-06-18T20:49:17 | Python | UTF-8 | Python | false | false | 1,964 | py | """ Admin Users classes. """
#Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
#Models
from django.contrib.auth.models import User
from users.models import Profile
from posts.models import Post
# Register your models here.
#admin.site.register(Profile)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
""" Profile admin. """
list_display = ('pk','user', 'phone_number', 'website', 'picture')
list_display_links = ('pk', 'user')
list_editable = ('phone_number','website','picture')
search_fields = (
'user__username',
'user__email',
'user__first_name',
'user__last_name',
'phone_number',
'website'
)
list_filter = (
'created',
'modified',
'user__is_active',
'user__is_staff'
)
fieldsets = (
('Profile',{
'fields': (
('user','picture'),
),
}),
('Extra info',{
'fields': (
('website', 'phone_number'),
('biography')
)
}),
('Metadata',{
'fields': (
('created','modified'),
)
})
)
readonly_fields = ('created','modified')
class ProfileInLine(admin.StackedInline):
""" Profile in-line admin for users. """
model = Profile
can_delete = False
verbose_name_plural = 'profiles'
class PostAdmin(admin.ModelAdmin):
list_display = ('user', 'title', 'photo', )
class UserAdmin(BaseUserAdmin):
""" Add profile admin to base user admin. """
inlines = (ProfileInLine,)
list_display = (
'username',
'email',
'first_name',
'last_name',
'is_active',
'is_staff'
)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Post, PostAdmin) | [
"gui.ruiz.alvarado@gmail.com"
] | gui.ruiz.alvarado@gmail.com |
be01e83be789080320012167b97227cca786a513 | 2488718297e47d726471f5aafb76369390912d5a | /Medicine predictor for The disease.py | 10ca4508a428e2c16874fae6221bb2084fe751b6 | [] | no_license | HarshaVardhanReddy18/HOUSE-MD | 9c9f887cdea34cf285e546ee01c9e60539bc2e6c | 659340a672ec9a89040ec614b8204b58c732339c | refs/heads/master | 2022-12-19T18:40:59.666603 | 2020-10-09T17:22:02 | 2020-10-09T17:22:02 | 296,778,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 19 12:26:24 2020
@author: shaur
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
os.chdir("D:\data sets")
dataset = pd.read_csv('web_sacrapped.csv', delimiter = ',', quoting = 3)
a=dataset.iloc[:,:2]
x=a.iloc[:,:1]
y=a.iloc[:,1:2]
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 213):
review = re.sub('[^a-zA-Z]', ' ', x['a'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
review = ' '.join(review)
corpus.append(review)
corpus1 = []
for i in range(0, 213):
review1 = re.sub('[^a-zA-Z]', ' ', y['b'][i])
review1 = review1.lower()
review1 = review1.split()
ps = PorterStemmer()
review1 = ' '.join(review1)
corpus1.append(review1)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
| [
"noreply@github.com"
] | HarshaVardhanReddy18.noreply@github.com |
45f343096530fa01c5f2708f14403031fa6baa1f | 5332fef91e044555e605bb37cbef7c4afeaaadb0 | /hy-data-analysis-with-python-2020/part06-e07_binding_sites/src/binding_sites.py | 6baad43f425d059dd9d258e457e1d88a1b708b0e | [] | no_license | nopomi/hy-data-analysis-python-2019 | f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8 | 464685cb377cfdeee890a008fbfbd9ed6e3bcfd0 | refs/heads/master | 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
def toint(x):
return 'ACGT'.find(x)
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep="\t")
X = [[toint(c) for c in s] for s in df["X"]]
return (np.array(X), np.array(df["y"]))
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(linkage="average", affinity="euclidean")
model.fit(X)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
distances=pairwise_distances(X, metric="euclidean")
#plot(distances)
return score
def cluster_hamming(filename):
X, y = get_features_and_labels(filename)
distances = pairwise_distances(X, metric="hamming")
model = AgglomerativeClustering(affinity="precomputed", linkage="average")
model.fit_predict(distances)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
#plot(distances, method="average", affinity="hamming")
return score
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
| [
"miska.noponen@gmail.com"
] | miska.noponen@gmail.com |
2ee2ccec5dbf7843302c65bae409bb7fdcc29b2a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3325.py | ba3c7f7f745af20e6283d8398fd4aeb577461651 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,323 | py | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[3],input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3325.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
deaa0857f040e4558c3a3aa27b0b1ff32bf995cc | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_1/16_1_3_ka_ya_c.py | 7735ad455887347c1c5a1e4c3582e3531bafa93a | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,141 | py | def solve(n, fs):
fs = [f-1 for f in fs]
lp = [None for p in xrange(n)]
for i in xrange(n):
chk = [False for p in xrange(n)]
p = i
cnt = 0
while not chk[p] and not lp[p]:
chk[p] = True
p = fs[p]
cnt += 1
if p == i:
while not lp[p]:
lp[p] = (cnt, 0)
p = fs[p]
for i in xrange(n):
p = i
cnt = 0
while not lp[p]:
p = fs[p]
cnt += 1
l, b = lp[p]
if cnt > b:
lp[p] = (l, cnt)
res = 0
tmp = 0
for i in xrange(n):
if lp[i]:
l, b = lp[i]
if l == 2:
j = fs[i]
_, bj = lp[j]
tmp += l + b + bj
else:
if l > res:
res = l
if tmp / 2 > res:
res = tmp / 2
return res
T = input()
for i in xrange(1, T+1):
N = input()
Fs = map(int, raw_input().split())
print 'Case #{}: {}'.format(i, solve(N, Fs))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
e9bdcafd637d894057834d086307fedfa9f62b56 | 17a1529d0403a8f2b5b1080305a0b61de61c1477 | /api/radiam/api/tests/permissionstests/datasetdatacollectionmethodpermissionstests.py | 0617cc4fd8c24ef85695bc483c62d91b49779f31 | [
"MIT"
] | permissive | usask-rc/radiam | 903dc6f21d17e371141a642d94e877ec993c3a66 | 6db6794fd1811b316dee6f6661986e027d8a594b | refs/heads/master | 2022-05-15T07:48:48.183526 | 2022-04-19T15:01:48 | 2022-04-19T15:01:48 | 237,302,758 | 2 | 1 | MIT | 2022-04-19T14:58:27 | 2020-01-30T20:48:56 | Python | UTF-8 | Python | false | false | 14,941 | py | import json
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from django.urls import reverse
from radiam.api.models import (
User, Dataset, DataCollectionMethod, DatasetDataCollectionMethod
)
from radiam.api.views import DatasetDataCollectionMethodViewSet
class TestSuperuserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Superuser roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='admin')
def test_superuser_read_datasetdatacollectionmethod_list(self):
"""
Test Superuser can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_superuser_write_datasetdatacollectionmethod_list(self):
"""
Test Superuser can write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(response=response, text="", status_code=201)
def test_superuser_read_datasetdatacollectionmethod_detail(self):
"""
Test Superuser can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_superuser_write_datasetdatacollectionmethod_detail(self):
"""
Test Superuser can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": reverse('datacollectionmethod-detail', kwargs={'pk': data_collection_method.id})
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
class TestAdminUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Admin User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser1')
def test_admin_user_read_datasetdatacollectionmethod_list(self):
"""
Test Admin User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_admin_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=201)
def test_adminuser_read_datasetdatacollectionmethod_detail(self):
"""
Test Admin user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_admin_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Admin User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": reverse('datacollectionmethod-detail', kwargs={'pk': data_collection_method.id})
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
class TestManagerUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Manager User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser2')
def test_manager_user_read_datasetdatacollectionmethod_list(self):
"""
Test Manager User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_manager_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': reverse('datacollectionmethod-detail', kwargs={'pk': datacollectionmethod.id})
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_manager_user_read_datasetdatacollectionmethod_detail(self):
"""
Test Manager user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_manager_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Manager User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": str(data_collection_method.id)
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=403)
class TestMemberUserDatasetDataCollectionMethodPermissions(APITestCase):
"""
Test Response codes for DataCollectionMethod endpoints for Member User roles
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser3')
def test_member_user_read_datasetdatacollectionmethod_list(self):
"""
Test Member User can read DatasetDataCollectionMethod list
"""
request = self.factory.get(reverse('datasetdatacollectionmethod-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_member_user_write_datasetdatacollectionmethod_list(self):
"""
Test Member user cannot write DatasetDataCollectionMethod list
"""
datacollectionmethod = DataCollectionMethod.objects.get(label='datacollection.method.other')
dataset = Dataset.objects.get(title='Research Is Fun')
body = {
'dataset': str(dataset.id),
'data_collection_method': str(datacollectionmethod.id)
}
request = self.factory.post(
reverse('datasetdatacollectionmethod-list'),
json.dumps(body),
content_type='application/json'
)
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_member_user_read_datasetdatacollectionmethod_detail(self):
"""
Test Member user can read a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
request = self.factory.get(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'get': 'retrieve'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_member_user_write_datasetdatacollectionmethod_detail(self):
"""
Test Member User can write a DatasetDataCollectionMethod detail
"""
detail_datasetdatacollectionmethod = \
DatasetDataCollectionMethod.objects.get(id='f9d1402a-2301-4bf8-b4cd-70590e3ca4b7')
data_collection_method = DataCollectionMethod.objects.get(label='datacollection.method.other')
body = {
"data_collection_method": str(data_collection_method.id)
}
request = self.factory.patch(
reverse('datasetdatacollectionmethod-detail',
args=[detail_datasetdatacollectionmethod.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = DatasetDataCollectionMethodViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_datasetdatacollectionmethod.id)
self.assertContains(
response=response,
text="",
status_code=403)
| [
"todd.trann@usask.ca"
] | todd.trann@usask.ca |
9646ac4cc55d9a5e30e41d7546f3ca1df7b888f9 | f0d9ba8456cdad2b2fa711fa8975b41da7af1784 | /worms/tests/__init__.py | 2b9503765bab2d60bb03f655ddf70c5209239ab5 | [
"Apache-2.0"
] | permissive | willsheffler/worms | f1d893d4f06b421abdd4d1e526b43c2e132e19a2 | 27993e33a43474d647ecd8277b210d4206858f0b | refs/heads/master | 2023-04-08T01:18:33.656774 | 2022-06-09T20:04:55 | 2022-06-09T20:04:55 | 118,678,808 | 6 | 5 | NOASSERTION | 2021-10-05T22:28:24 | 2018-01-23T22:30:45 | Python | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
"""Unit test package for worms."""
import os
import pytest
try:
import pyrosetta
HAVE_PYROSETTA = True
only_if_pyrosetta = lambda x: x
try:
import pyrosetta.distributed
HAVE_PYROSETTA_DISTRIBUTED = True
only_if_pyrosetta_distributed = lambda x: x
except ImportError:
HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta_distributed = pytest.mark.skip
except ImportError:
HAVE_PYROSETTA = HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta = only_if_pyrosetta_distributed = pytest.mark.skip
only_if_jit = lambda x: x
if "NUMBA_DISABLE_JIT" in os.environ:
only_if_jit = pytest.mark.skip
| [
"willsheffler@gmail.com"
] | willsheffler@gmail.com |
100bda6658216fe7f659d6bfc212e9f3abf66c93 | 40b407afc90402e8374f44a504c286c962f72f76 | /run.py | 763bbea9b9844c06c4c16006597f6ee15e168b7d | [] | no_license | ThatTechGuy/RF-DCA | bce60ca5de196fa4408ff229cefb01b1ec06746b | ccfd25f6e51515ed47127f95c3dc7ac61f5e1c4c | refs/heads/master | 2021-01-17T20:02:40.371378 | 2016-05-07T14:59:02 | 2016-05-07T14:59:02 | 45,054,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | from dca import app
if __name__ == '__main__':
app.run()
| [
"rmartin@fullsail.edu"
] | rmartin@fullsail.edu |
608b82ddbdfb60e287c7eefdc12c1720cb30fdaf | 497ead1ee1e09a2530aa771ae059989e341684d7 | /python/cuml/tests/test_preprocessing.py | 5f571d8fd3b65acdba8035c0565dded708eeb5ec | [
"Apache-2.0"
] | permissive | xieliaing/cuml | 193f5753696bbfd4de8e3eaef919c18da2fd1d1a | 78092ddde28d5a810e45d6186f049c1309121408 | refs/heads/master | 2022-11-10T16:45:38.818055 | 2022-11-03T23:12:07 | 2022-11-03T23:12:07 | 159,592,316 | 0 | 0 | Apache-2.0 | 2018-11-29T01:59:07 | 2018-11-29T01:59:07 | null | UTF-8 | Python | false | false | 36,389 | py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.preprocessing import \
Binarizer as cuBinarizer, \
FunctionTransformer as cuFunctionTransformer, \
KBinsDiscretizer as cuKBinsDiscretizer, \
KernelCenterer as cuKernelCenterer, \
MaxAbsScaler as cuMaxAbsScaler, \
MinMaxScaler as cuMinMaxScaler, \
MissingIndicator as cuMissingIndicator, \
Normalizer as cuNormalizer, \
PolynomialFeatures as cuPolynomialFeatures, \
PowerTransformer as cuPowerTransformer, \
QuantileTransformer as cuQuantileTransformer, \
RobustScaler as cuRobustScaler, \
SimpleImputer as cuSimpleImputer, \
StandardScaler as cuStandardScaler
from cuml.preprocessing import \
add_dummy_feature as cu_add_dummy_feature, \
binarize as cu_binarize, \
maxabs_scale as cu_maxabs_scale, \
minmax_scale as cu_minmax_scale, \
normalize as cu_normalize, \
power_transform as cu_power_transform, \
quantile_transform as cu_quantile_transform, \
robust_scale as cu_robust_scale, \
scale as cu_scale
from sklearn.preprocessing import \
Binarizer as skBinarizer, \
FunctionTransformer as skFunctionTransformer, \
KBinsDiscretizer as skKBinsDiscretizer, \
KernelCenterer as skKernelCenterer, \
MaxAbsScaler as skMaxAbsScaler, \
MinMaxScaler as skMinMaxScaler, \
Normalizer as skNormalizer, \
PolynomialFeatures as skPolynomialFeatures, \
PowerTransformer as skPowerTransformer, \
QuantileTransformer as skQuantileTransformer, \
RobustScaler as skRobustScaler, \
StandardScaler as skStandardScaler
from sklearn.preprocessing import \
add_dummy_feature as sk_add_dummy_feature, \
binarize as sk_binarize, \
maxabs_scale as sk_maxabs_scale, \
minmax_scale as sk_minmax_scale, \
normalize as sk_normalize, \
power_transform as sk_power_transform, \
quantile_transform as sk_quantile_transform, \
robust_scale as sk_robust_scale, \
scale as sk_scale
from sklearn.impute import \
MissingIndicator as skMissingIndicator, \
SimpleImputer as skSimpleImputer
from cuml.testing.test_preproc_utils import \
clf_dataset, int_dataset, blobs_dataset, \
nan_filled_positive, \
sparse_nan_filled_positive, \
sparse_clf_dataset, \
sparse_blobs_dataset, \
sparse_int_dataset, \
sparse_imputer_dataset, \
sparse_dataset_with_coo # noqa: F401
from cuml.testing.test_preproc_utils import assert_allclose
from cuml.metrics import pairwise_kernels
import numpy as np
import cupy as cp
import cupyx as cpx
import scipy
@pytest.mark.parametrize("feature_range", [(0, 1), (.1, 0.8)])
def test_minmax_scaler(failure_logger, clf_dataset, # noqa: F811
feature_range):
X_np, X = clf_dataset
scaler = cuMinMaxScaler(feature_range=feature_range, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMinMaxScaler(feature_range=feature_range, copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("feature_range", [(0, 1), (.1, 0.8)])
def test_minmax_scale(failure_logger, clf_dataset, # noqa: F811
axis, feature_range):
X_np, X = clf_dataset
t_X = cu_minmax_scale(X, feature_range=feature_range, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_minmax_scale(X_np, feature_range=feature_range, axis=axis)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler(failure_logger, clf_dataset, # noqa: F811
with_mean, with_std):
X_np, X = clf_dataset
scaler = cuStandardScaler(with_mean=with_mean,
with_std=with_std,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skStandardScaler(with_mean=with_mean,
with_std=with_std,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler_sparse(failure_logger,
sparse_clf_dataset, # noqa: F811
with_std):
X_np, X = sparse_clf_dataset
scaler = cuStandardScaler(with_mean=False, with_std=with_std, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skStandardScaler(copy=True, with_mean=False, with_std=with_std)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
# The numerical warning is triggered when centering or scaling
# cannot be done as single steps. Its display can be safely disabled.
# For more information see : https://github.com/rapidsai/cuml/issues/4203
@pytest.mark.filterwarnings("ignore:Numerical issues::")
def test_scale(failure_logger, clf_dataset, axis, # noqa: F811
with_mean, with_std):
X_np, X = clf_dataset
t_X = cu_scale(X, axis=axis, with_mean=with_mean,
with_std=with_std, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_scale(X_np, axis=axis, with_mean=with_mean,
with_std=with_std, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_scale_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
with_std):
X_np, X = sparse_clf_dataset
t_X = cu_scale(X, with_mean=False, with_std=with_std, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_scale(X_np, with_mean=False, with_std=with_std, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
def test_maxabs_scale(failure_logger, clf_dataset, axis): # noqa: F811
X_np, X = clf_dataset
t_X = cu_maxabs_scale(X, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_maxabs_scale(X_np, axis=axis)
assert_allclose(t_X, sk_t_X)
def test_maxabs_scaler(failure_logger, clf_dataset): # noqa: F811
X_np, X = clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_maxabs_scaler_sparse(failure_logger,
sparse_clf_dataset): # noqa: F811
X_np, X = sparse_clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalizer(failure_logger, clf_dataset, norm): # noqa: F811
X_np, X = clf_dataset
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
assert type(t_X) == type(X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalizer_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
norm):
X_np, X = sparse_clf_dataset
if X.format == 'csc':
pytest.skip("Skipping CSC matrices")
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
@pytest.mark.parametrize("return_norm", [True, False])
def test_normalize(failure_logger, clf_dataset, axis, norm, # noqa: F811
return_norm):
X_np, X = clf_dataset
if return_norm:
t_X, t_norms = cu_normalize(X, axis=axis, norm=norm,
return_norm=return_norm)
sk_t_X, sk_t_norms = sk_normalize(X_np, axis=axis, norm=norm,
return_norm=return_norm)
assert_allclose(t_norms, sk_t_norms)
else:
t_X = cu_normalize(X, axis=axis, norm=norm, return_norm=return_norm)
sk_t_X = sk_normalize(X_np, axis=axis, norm=norm,
return_norm=return_norm)
assert type(t_X) == type(X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ['l1', 'l2', 'max'])
def test_normalize_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
norm):
X_np, X = sparse_clf_dataset
axis = 0 if X.format == 'csc' else 1
t_X = cu_normalize(X, axis=axis, norm=norm)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_normalize(X_np, axis=axis, norm=norm)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent",
"constant"])
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("add_indicator", [False, True])
def test_imputer(failure_logger, random_seed, int_dataset, # noqa: F811
strategy, missing_values, add_indicator):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
np.random.seed(random_seed)
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value,
add_indicator=add_indicator)
t_X = imputer.fit_transform(X)
assert type(t_X) == type(X)
imputer = skSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value,
add_indicator=add_indicator)
sk_t_X = imputer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent",
"constant"])
def test_imputer_sparse(sparse_imputer_dataset, # noqa: F811
strategy):
missing_values, X_sp, X = sparse_imputer_dataset
if X.format == 'csr':
pytest.skip("Skipping CSR matrices")
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value)
t_X = imputer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
imputer = skSimpleImputer(copy=True, missing_values=missing_values,
strategy=strategy, fill_value=fill_value)
sk_t_X = imputer.fit_transform(X_sp)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("order", ['C', 'F'])
def test_poly_features(failure_logger, clf_dataset, degree, # noqa: F811
interaction_only, include_bias, order):
X_np, X = clf_dataset
polyfeatures = cuPolynomialFeatures(degree=degree, order=order,
interaction_only=interaction_only,
include_bias=include_bias)
t_X = polyfeatures.fit_transform(X)
assert type(X) == type(t_X)
cu_feature_names = polyfeatures.get_feature_names()
if isinstance(t_X, np.ndarray):
if order == 'C':
assert t_X.flags['C_CONTIGUOUS']
elif order == 'F':
assert t_X.flags['F_CONTIGUOUS']
polyfeatures = skPolynomialFeatures(degree=degree, order=order,
interaction_only=interaction_only,
include_bias=include_bias)
sk_t_X = polyfeatures.fit_transform(X_np)
sk_feature_names = polyfeatures.get_feature_names()
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
assert sk_feature_names == cu_feature_names
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
def test_poly_features_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
degree, interaction_only, include_bias):
X_np, X = sparse_clf_dataset
polyfeatures = cuPolynomialFeatures(degree=degree,
interaction_only=interaction_only,
include_bias=include_bias)
t_X = polyfeatures.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
polyfeatures = skPolynomialFeatures(degree=degree,
interaction_only=interaction_only,
include_bias=include_bias)
sk_t_X = polyfeatures.fit_transform(X_np)
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature(failure_logger, clf_dataset, value): # noqa: F811
X_np, X = clf_dataset
t_X = cu_add_dummy_feature(X, value=value)
assert type(t_X) == type(X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature_sparse(failure_logger,
sparse_dataset_with_coo, # noqa: F811
value):
X_np, X = sparse_dataset_with_coo
t_X = cu_add_dummy_feature(X, value=value)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarize(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarize_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
threshold):
X_np, X = sparse_clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarizer(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
assert type(t_X) == type(X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0., 1.])
def test_binarizer_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
threshold):
X_np, X = sparse_clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scaler(failure_logger, clf_dataset, # noqa: F811
with_centering, with_scaling, quantile_range):
X_np, X = clf_dataset
scaler = cuRobustScaler(with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skRobustScaler(with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scaler_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
with_scaling, quantile_range):
X_np, X = sparse_clf_dataset
if X.format != 'csc':
X = X.tocsc()
scaler = cuRobustScaler(with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skRobustScaler(with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scale(failure_logger, clf_dataset, # noqa: F811
with_centering, axis, with_scaling, quantile_range):
X_np, X = clf_dataset
t_X = cu_robust_scale(X, axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_robust_scale(X_np, axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25., 75.), (10., 90.)])
def test_robust_scale_sparse(failure_logger, sparse_clf_dataset, # noqa: F811
axis, with_scaling, quantile_range):
X_np, X = sparse_clf_dataset
if X.format != 'csc' and axis == 0:
X = X.tocsc()
elif X.format != 'csr' and axis == 1:
X = X.tocsr()
t_X = cu_robust_scale(X, axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_robust_scale(X_np, axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("n_bins", [5, 20])
@pytest.mark.parametrize("encode", ['ordinal', 'onehot-dense', 'onehot'])
@pytest.mark.parametrize("strategy", [
pytest.param('uniform', marks=pytest.mark.xfail(
strict=False,
reason='Intermittent mismatch with sklearn'
' (https://github.com/rapidsai/cuml/issues/3481)'
)),
pytest.param('quantile', marks=pytest.mark.xfail(
strict=False,
reason='Intermittent mismatch with sklearn'
' (https://github.com/rapidsai/cuml/issues/2933)'
)),
'kmeans'
])
def test_kbinsdiscretizer(failure_logger, blobs_dataset, n_bins, # noqa: F811
encode, strategy):
X_np, X = blobs_dataset
transformer = cuKBinsDiscretizer(n_bins=n_bins,
encode=encode,
strategy=strategy)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
if encode != 'onehot':
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skKBinsDiscretizer(n_bins=n_bins,
encode=encode,
strategy=strategy)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
if strategy == 'kmeans':
assert_allclose(t_X, sk_t_X, ratio_tol=0.2)
else:
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("features", ['missing-only', 'all'])
def test_missing_indicator(failure_logger, int_dataset, # noqa: F811
missing_values, features):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
indicator = cuMissingIndicator(missing_values=missing_values,
features=features)
ft_X = indicator.fit_transform(X)
assert type(ft_X) == type(X)
indicator.fit(X)
t_X = indicator.transform(X)
assert type(t_X) == type(X)
indicator = skMissingIndicator(missing_values=missing_values,
features=features)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("features", ['missing-only', 'all'])
def test_missing_indicator_sparse(failure_logger,
sparse_int_dataset, # noqa: F811
features):
X_np, X = sparse_int_dataset
indicator = cuMissingIndicator(features=features,
missing_values=1)
ft_X = indicator.fit_transform(X)
# assert type(ft_X) == type(X)
assert cpx.scipy.sparse.issparse(ft_X) or scipy.sparse.issparse(ft_X)
indicator.fit(X)
t_X = indicator.transform(X)
# assert type(t_X) == type(X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
indicator = skMissingIndicator(features=features,
missing_values=1)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
def test_function_transformer(clf_dataset): # noqa: F811
X_np, X = clf_dataset
transformer = cuFunctionTransformer(func=cp.exp,
inverse_func=cp.log,
check_inverse=False)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skFunctionTransformer(func=np.exp,
inverse_func=np.log,
check_inverse=False)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_function_transformer_sparse(sparse_clf_dataset): # noqa: F811
X_np, X = sparse_clf_dataset
transformer = cuFunctionTransformer(func=lambda x: x * 2,
inverse_func=lambda x: x / 2,
accept_sparse=True)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
assert cpx.scipy.sparse.issparse(r_X) or scipy.sparse.issparse(r_X)
transformer = skFunctionTransformer(func=lambda x: x * 2,
inverse_func=lambda x: x / 2,
accept_sparse=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer(failure_logger,
nan_filled_positive, # noqa: F811
n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = nan_filled_positive
transformer = \
cuQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
t_X = transformer.fit_transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = \
skQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer_sparse(failure_logger,
sparse_nan_filled_positive, # noqa: F811
n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = sparse_nan_filled_positive
X_np = X_np.tocsc()
X = X.tocsr().tocsc()
transformer = \
cuQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
t_X = transformer.fit_transform(X)
t_X = t_X.tocsc()
r_X = transformer.inverse_transform(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = \
skQuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample, random_state=42, copy=True)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ['uniform', 'normal'])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transform(failure_logger, nan_filled_positive, # noqa: F811
axis, n_quantiles, output_distribution,
ignore_implicit_zeros, subsample):
X_np, X = nan_filled_positive
t_X = cu_quantile_transform(X, axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_quantile_transform(X_np, axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("method", ['yeo-johnson', 'box-cox'])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transformer(failure_logger, nan_filled_positive, # noqa: F811
method, standardize):
X_np, X = nan_filled_positive
transformer = cuPowerTransformer(method=method,
standardize=standardize,
copy=True)
ft_X = transformer.fit_transform(X)
assert type(ft_X) == type(X)
t_X = transformer.transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
normalizer = skPowerTransformer(method=method,
standardize=standardize,
copy=True)
sk_t_X = normalizer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("method", ['yeo-johnson', 'box-cox'])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transform(failure_logger, nan_filled_positive, # noqa: F811
method, standardize):
X_np, X = nan_filled_positive
t_X = cu_power_transform(X, method=method, standardize=standardize)
assert type(t_X) == type(X)
sk_t_X = sk_power_transform(X_np, method=method, standardize=standardize)
assert_allclose(t_X, sk_t_X)
def test_kernel_centerer():
X = np.array([[1., -2., 2.],
[-2., 1., 3.],
[4., 1., -2.]])
K = pairwise_kernels(X, metric='linear')
model = cuKernelCenterer()
model.fit(K)
t_X = model.transform(K, copy=True)
assert type(t_X) == type(X)
model = skKernelCenterer()
sk_t_X = model.fit_transform(K)
assert_allclose(sk_t_X, t_X)
def test__repr__():
assert cuBinarizer().__repr__() == 'Binarizer()'
assert cuFunctionTransformer().__repr__() == 'FunctionTransformer()'
assert cuKBinsDiscretizer().__repr__() == 'KBinsDiscretizer()'
assert cuKernelCenterer().__repr__() == 'KernelCenterer()'
assert cuMaxAbsScaler().__repr__() == 'MaxAbsScaler()'
assert cuMinMaxScaler().__repr__() == 'MinMaxScaler()'
assert cuMissingIndicator().__repr__() == 'MissingIndicator()'
assert cuNormalizer().__repr__() == 'Normalizer()'
assert cuPolynomialFeatures().__repr__() == 'PolynomialFeatures()'
assert cuQuantileTransformer().__repr__() == 'QuantileTransformer()'
assert cuRobustScaler().__repr__() == 'RobustScaler()'
assert cuSimpleImputer().__repr__() == 'SimpleImputer()'
assert cuStandardScaler().__repr__() == 'StandardScaler()'
| [
"noreply@github.com"
] | xieliaing.noreply@github.com |
e25fd776db4cf8dfcdb7f6e854d3db92deb6dbc6 | 00da73f35308b860ef9a3c6eb6cdaf8c89608f57 | /deps/requests/adapters.py | cdaabdbee6f16c829f051891b4fe6ff7b718df96 | [
"MIT"
] | permissive | kylebebak/Requester | 32abf8a56ba0e9e42fdd25b13ce48d40a87f20e0 | 7f177bc417c45fd1792c6020543a4c6909e3ea21 | refs/heads/master | 2022-07-17T11:09:30.238568 | 2022-05-05T17:31:48 | 2022-05-05T17:38:56 | 89,746,594 | 333 | 16 | MIT | 2021-02-23T14:43:12 | 2017-04-28T21:37:08 | Python | UTF-8 | Python | false | false | 20,880 | py | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| [
"kylebebak@gmail.com"
] | kylebebak@gmail.com |
705b762aac98d0ee84e4cbc313bdf63e847cf8f6 | 79b2b24205f17ade5b41f3df5bd0869b87b3fa1e | /xfrmer.py | 659d75702950f2f0104abd94f46b802ffa978ae5 | [] | no_license | metrologist/Current-Transformer-Scale | 3539d129c31934053e286c621cf71748a5d00ee4 | 004987f058796de19cbc2efbed8ae305c1b74f5b | refs/heads/master | 2020-05-07T21:45:48.044136 | 2019-04-12T02:49:19 | 2019-04-12T02:49:19 | 180,917,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | from __future__ import division
from __future__ import print_function
class TRANSFORMER(object):
"""
The TRANSFORMER class captures the essential structure of a transformer. It is specifically constructed for use
with MSL's two-stage primary reference current transformers. These transformers have a main secondary and a second
core auxiliary secondary. Primary windings are around both cores layered in groups with each group having a series
or parallel connection. It should also be useable for a single core transformer with multiple secondary taps and
fixed or window wound primary windings.
There will likely be some experimentation with lists and dictionaries as to how best to describe a transfsormer.
It should be possible to include calibration constants.
"""
def __init__(self, primaries, secondaries, cores, type):
"""
:param primaries: a list of sets of primary windings
:param sedondaries: a list of sets of secondary windings
:param cores: a list of cores (either 1 or 2 cores)
:param type: either current or voltage
"""
self.primaries = primaries
self.secondaries = secondaries
self.cores = cores
assert type in ['voltage' , 'current'], "transformer type must be voltage or current"
self.type = type
def nominal_ratio(self, primary, secondary):
if self.type == 'voltage':
rat = primary/secondary
elif self.type == 'current':
rat = secondary/primary
return rat
def series(self, primary):
return sum(primary)
def parallel(self, primary):
for i in range(1, len(primary)):
assert primary[i] == primary[0], 'parallel windings must all have identical number of turns'
return primary[0] | [
"noreply@github.com"
] | metrologist.noreply@github.com |
419c0aaf771b74098121ba21ec364f0ae708f144 | a672f92dba39ce7cab2bf6ce3276ee06ff20b3be | /classification/util.py | b8cb62de26385e36f144c3a59e5d6378b687ea07 | [
"MIT"
] | permissive | BobbyZhouZijian/AI-Algo-Implmentations | 6af10a23276492d735686aeacdfa29257d406295 | 5592d3c358cc1611a1bde61797b93c0d6eee10c6 | refs/heads/main | 2023-08-05T06:37:04.027003 | 2021-09-30T05:30:31 | 2021-09-30T05:30:31 | 370,375,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | import pandas as pd
import numpy as np
import math
def get_input_label_split(train_data, label_name=None):
if label_name == None:
train = train_data.iloc[:,:].to_numpy()
return train
y = train_data[label_name].to_numpy()
train = train_data.drop(columns=[label_name])
train = train.iloc[:,:].to_numpy()
return train, y
def get_accuracy(pred, y, thres=0.5):
if len(pred) != len(y):
raise Exception(f"size of pred is inconsistent with y. Expected pred \
to have size {len(y)} but got {len(pred)}")
total = len(pred)
acc_cnt = 0
for i in range(total):
cur_pred = 1 if pred[i] > thres else 0
if cur_pred == y[i]:
acc_cnt += 1
return acc_cnt / total
def get_precision(pred, y, thres=0.5):
if len(pred) != len(y):
raise Exception(f"size of pred is inconsistent with y. Expected pred \
to have size {len(y)} but got {len(pred)}")
total = 0
acc_cnt = 0
for i in range(len(pred)):
if y[i] == 0:
continue
total += 1
cur_pred = 1 if pred[i] > thres else 0
if cur_pred == y[i]:
acc_cnt += 1
return acc_cnt / total
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def calculate_entropy(df):
instances = df.shape[0]
columns = df.shape[1]
decisions = df['Decision'].value_counts().keys().tolist()
entropy = 0
for i in range(0, len(dicisions)):
decision = decisions[i]
num_of_decisons = df['Decision'].value_counts().tolist()[i]
class_probability = num_of_decisions / instances
entropy = entropy - class_probability * math.log(class_probability, 2)
return entropy
def discretize(df_col):
'''
Discretize a column if it contains more than 10 distinct values
Returns:
None if the column needs to be discretized
the discretized column in a numpy array
'''
distinct = np.unique(df_col.to_numpy())
if len(distinct) < 7:
# if number of distinct elements is less than 7
# do nothing and return false
return None
else:
# get the mean, std, min and max of the df column
mean = df_col.mean()
std = df_col.std()
minm = df_col.min()
maxm = df_col.max()
# sort values into 7 buckets
scaler = [-3, -2, -1, 0, 1, 2, 3]
values = []
for i, scale in enumerate(scaler):
if i == 0:
values.append((float('-inf'), scale * std + mean))
if i == len(scaler)-1:
values.append((scale * std + mean, float('inf')))
else:
next_scale = scaler[i+1]
values.append((scale * std + mean, next_scale * std + mean))
# assign the values to the discretized intervals
to_replace = np.zeros(len(df_col), dtype=tuple)
for i in range(len(df_col)):
cur_val = df_col.iloc[i]
for v in values:
if cur_val >= v[0] and cur_val <= v[1]:
to_replace[i] = v
break
return to_replace
| [
"zephyroszhou@gmail.com"
] | zephyroszhou@gmail.com |
b9e1f9a4e83ce496a09c5efd5b204b28738b2214 | 4ab9b679881e80b1e277d4d08840e5e62cc91c5a | /learngh/settings/base.py | cd75583de0fc19fa427def4322f4fe2022f693e3 | [] | no_license | agbekofrank/learnghb | 8987fd4c543a85752c03f473124e8ca82b959545 | a43560a18d807aae3d3f507b4f37f07e6e889852 | refs/heads/main | 2023-07-22T19:06:07.654386 | 2021-08-28T01:39:15 | 2021-08-28T01:39:15 | 400,674,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,148 | py |
import os
import dotenv
from datetime import timedelta
# import django_heroku
# import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# SECRET_KEY = '%p8#dko(q2l2d+9-k(f)6w-1p$(*3*y2v#+^ebjxka@og*oocd'
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
print('Using base')
# ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# third party
'rest_framework',
'rest_framework.authtoken',
'crispy_forms',
'corsheaders',
# All auth
'allauth',
'allauth.account',
'rest_auth',
'rest_auth.registration',
# local apps
'accounts',
'posts',
'course_content',
'questions',
'solutions',
'lessons',
'heroes',
'file_upload'
]
# allauth
SITE_ID = 1
ACCOUNT_EMAIL_VERIFICATION = 'none' # change on production
# JWT settings
REST_USE_JWT = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learngh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'learngh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'learngh',
'USER': 'agbeko',
'PASSWORD': os.environ['PASSWORD'],
'HOST': '*',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# static files source for the project during development
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
# static files source for the project during production
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_cdn', 'media_root')
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR),
"static_cdn", "static_root")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# where uploaded files would be kept
MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_cdn', 'media_root')
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR),
"static_cdn", "media_root")
CRISPY_TEMPLATE_PACK = 'uni_form'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
],
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication'
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': timedelta(days=30),
'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=30),
'JWT_ALLOW_REFRESH': True,
# 'JWT_ALGORITHM': 'RS256',
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
CORS_ORIGIN_WHITELIST = [
'http://localhost:4200'
]
ACCESS_CONTROL_ALLOW_ORIGIN = [
'http://localhost:4200'
]
ACCESS_CONTROL_ALLOW_CREDENTIAL = True
CORS_ORIGIN_ALLOW_ALL = True
# DATABASES = {}
# DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# DATABASES['default'] = dj_database_url.config(default='postgres://...')
# django_heroku.settings(locals())
# del DATABASES['default']['OPTIONS']['sslmode']
| [
"frank.agbeko@amalitech.org"
] | frank.agbeko@amalitech.org |
c694bd620048754305caaa671e59cd8415f16dab | 077e1d088b31e8858e6f2d7f855e18d9a7a0ac09 | /creditManage/views.py | 753f8c72207ffeb123a0ec7552b56cb4b619ded3 | [] | no_license | sagatachatterjee/Credit-Management | 9382a16cf4c10269a2d5f882d880c2647f40b5a4 | 6a7b57222f2e2f3b9891ce418cb325d420d64533 | refs/heads/master | 2020-05-03T11:41:21.998242 | 2019-03-21T09:37:53 | 2019-03-21T09:37:53 | 178,606,533 | 1 | 0 | null | 2019-03-30T20:11:25 | 2019-03-30T20:11:25 | null | UTF-8 | Python | false | false | 2,100 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from .models import user_detail,transaction
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
ids=0
def index(request):
return render(request,'creditManage/index.html',{})
def view_usr(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/view_usr.html',detail_send)
def view_trans(request):
all_entries=transaction.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/view_trans.html',detail_send)
def transfer(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/transfer.html',detail_send)
def transfer_to(request):
all_entries=user_detail.objects.all()
detail_send={"data":all_entries}
return render(request,'creditManage/transfer_to.html',detail_send)
@csrf_exempt
def detail1(request):
print(" hello")
t_from=""
t_to=""
if(request.POST):
print(" hello1")
print(request.POST)
c=request.POST.get('id')
c1=request.POST.get('credit')
print(type(c1))
all_entries=user_detail.objects.all()
for i in all_entries:
print("hola")
if(i.id==ids):
if(i.credit>=int(c1)):
print (i.credit)
user_detail.objects.filter(id=i.id).update(credit=i.credit-int(c1))
t_from=i.name
else:
return HttpResponse('error')
if(i.id==int(c)):
print("hh")
user_detail.objects.filter(id=i.id).update(credit=i.credit+int(c1))
t_to=i.name
uu=transaction.objects.create(trac_from=t_from,trac_to=t_to,credit=int(c1))
return HttpResponse('success')
@csrf_exempt
def detail(request):
global ids
print(" hello")
if(request.POST):
print(" hello1")
print(request.POST)
c=request.POST.get('id')
all_entries=user_detail.objects.all()
for i in all_entries:
print("hola")
if(i.id==int(c)):
ids=i.id
print("hh")
return HttpResponse('success')
| [
"noreply@github.com"
] | sagatachatterjee.noreply@github.com |
440528b10e009c56ce24ed669064d788fd80bd40 | 5e6a21328057f91d489319533e1927b8107b9e0c | /Tests/test_mixture.py | 92eabbb87afe89edf29f0e699e6b252eaf5bcab2 | [
"BSD-3-Clause"
] | permissive | murrayrm/BioCRNPyler | d84437326742a04ac508a7e068c19a8c8816d7d7 | 2e7d4c521b1ebdf7cff6867b25cbee014e0ee1a3 | refs/heads/master | 2020-07-05T10:12:56.382470 | 2020-04-23T20:03:01 | 2020-04-23T20:03:01 | 202,620,151 | 1 | 0 | BSD-3-Clause | 2019-08-15T22:35:16 | 2019-08-15T22:35:16 | null | UTF-8 | Python | false | false | 3,540 | py | # Copyright (c) 2019, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from unittest import TestCase
class TestMixture(TestCase):
def test_add_species(self):
from biocrnpyler import Mixture
from biocrnpyler import Species
species = Species('test_species')
mixture = Mixture()
mixture.add_species(species)
self.assertEqual([species], mixture.added_species)
with self.assertRaises(AssertionError):
mixture.add_species(['ok', 'ok'])
def test_add_components(self):
from biocrnpyler import Mixture
from biocrnpyler import Component
from biocrnpyler import Species
mixture = Mixture()
self.assertTrue(len(mixture.components) == 0)
component = Component('test_comp')
mixture.add_components(component)
self.assertTrue(component in mixture.components)
species = Species('test_species')
with self.assertRaises(AssertionError):
mixture.add_components(species)
def test_update_species(self):
from biocrnpyler import Mixture
from biocrnpyler import Species
from biocrnpyler import DNA
# from biocrnpyler import Dilution
species = Species(name='H2O')
mixture = Mixture(species=[species])
self.assertTrue(species in mixture.update_species())
dna = DNA(name='test_DNA')
mixture.add_components(dna)
crn_list = mixture.update_species()
for s_dna in dna.update_species():
self.assertTrue(s_dna in crn_list)
# Currently, there is no global mechanism that creates new species
# dilution_mechanism = Dilution()
# global_mechanisms = {"dilution": dilution_mechanism}
#
# mixture = Mixture(global_mechanisms=global_mechanisms)
# mixture.update_species()
def test_update_reactions(self):
from biocrnpyler import Mixture
from biocrnpyler import Reaction
from biocrnpyler import Component
mixture = Mixture()
with self.assertRaises(AttributeError):
mixture.update_reactions()
component = Component(name='test_component')
def mock_update_reactions():
rxn = Reaction(inputs=[], outputs=[], k=0.1)
return [rxn]
component.update_reactions = mock_update_reactions
mixture.add_components(component)
mixture.update_species()
crn_rxn = mixture.update_reactions()
crn_rxn_mock = mock_update_reactions()
self.assertEqual(crn_rxn, crn_rxn_mock)
# TODO add test for reactions added by global mechanisms
def test_compile_crn(self):
from biocrnpyler import ChemicalReactionNetwork
from biocrnpyler import Species
from biocrnpyler import Reaction
from biocrnpyler import Mixture
a = Species(name='a')
b = Species(name='b')
species_list = [a, b]
def mock_update_reactions():
rxn = Reaction(inputs=[a], outputs=[b], k=0.1)
return [rxn]
rxn = Reaction(inputs=[a], outputs=[b], k=0.1)
CRN = ChemicalReactionNetwork(species_list, [rxn])
mixture = Mixture(species=species_list)
mixture.update_reactions = mock_update_reactions
crn_from_mixture = mixture.compile_crn()
self.assertEqual(CRN.species, crn_from_mixture.species)
self.assertEqual(CRN.reactions, crn_from_mixture.reactions)
| [
"zoltuz@gmail.com"
] | zoltuz@gmail.com |
383fb7131e139dd7887a2437e60848af1d58580f | 70add60ba088146dd4984d232c6a7136f3f57765 | /hack/gopath_from_workspace.py | cd64f6b3532712864f3d8896b95453fbe8b4447d | [
"Apache-2.0"
] | permissive | ericchiang/cluster-registry | 1983a213ede3e23cd2ea3565912787ba706fbe49 | 3551dbcb0da06364fc8c0c1e9c3f1c9230d9f537 | refs/heads/master | 2021-03-30T17:12:13.950798 | 2017-10-20T20:06:49 | 2017-10-23T22:34:44 | 108,303,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creates a populated GOPATH from the repositories in a bazel workspace.
# Used to ensure that code generation scripts are running against the versions
# of external libraries in the workspace.
#
# Requires an empty temporary directory to be provided as its first argument:
# ./gopath_from_workspace.sh <tmpdir>
#
# This populates the provided directory as a GOPATH.
import os.path
import argparse
import shutil
import string
import subprocess
import xml.etree.ElementTree as ElementTree
def main(tmpdir):
subprocess.check_call(["bazel", "fetch", "//:genfiles_deps"])
bazel_external_dir = os.path.join(
string.strip(subprocess.check_output(["bazel", "info", "output_base"])),
"external")
workspace_dir = string.strip(
subprocess.check_output(["bazel", "info", "workspace"]))
query_result = subprocess.check_output([
"bazel", "query", "kind(go_repository, //external:*)", "--output", "xml"
])
xml = ElementTree.fromstring(query_result)
elements = xml.findall("./rule")
for e in elements:
name = e.find("./string[@name='name']").attrib["value"]
importpath_element = e.find("./string[@name='importpath']")
if importpath_element is not None:
import_path = importpath_element.attrib["value"]
srcdir = os.path.join(bazel_external_dir, name)
if os.path.exists(srcdir):
shutil.copytree(
srcdir, os.path.join(tmpdir, "src", import_path), symlinks=True)
shutil.copytree(
workspace_dir,
os.path.join(tmpdir, "src", "k8s.io", "cluster-registry"),
symlinks=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("tmpdir")
args = parser.parse_args()
main(string.strip(args.tmpdir))
| [
"dvorakviolinist@gmail.com"
] | dvorakviolinist@gmail.com |
b48acca645c875be946e114f798a9563f27d31d1 | e56e7d398376e575d0b42fa36070f071277fae75 | /blog/models.py | 547c946c1179a5983e2a2428333286dd1a097a47 | [] | no_license | AdebambiComfort/My-Blog | 00ef8cce812a941c58483b1f9d01afe9b4067f72 | bd504390c983eed2ce7f308e9e00b6bcc676bf5c | refs/heads/master | 2020-07-01T04:27:36.641489 | 2019-08-07T13:14:32 | 2019-08-07T13:14:32 | 201,048,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"olatundecomfort94@gmail.com"
] | olatundecomfort94@gmail.com |
410f0bef7e07bb907331e8704631f93f9140cc98 | edfc985df440d4f5fee3ebbf6a52dfd70baa06e4 | /Funktiot/average.py | 859e2faeca5e0127ba8a1d6ee43c9385377a97a6 | [] | no_license | Sanbu94/Python-kurssi2021 | 9fe5e832b1268a89b2fc9efcaa7062ad307163d4 | e8d5deb7b03eb6f15b5f846d899f221bd5568efe | refs/heads/master | 2023-04-19T04:22:23.281939 | 2021-03-25T15:14:31 | 2021-03-25T15:14:31 | 331,665,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | numbers = [1,2,3,4,5]
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers)
print(average)
print("\n")
numbers = [9,8,7,6,5]
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers)
print(average)
#Funktio määritetään def-avainsanalla. Funktio input (parametrit) määritetään sulkeiden sisällä.
def average(numbers):
sum = 0
for number in numbers:
sum -= number
average = sum / len(numbers) | [
"Seppälä"
] | Seppälä |
f81d9c26faead21f3a35dec21c9bbdcb0bf3d125 | 7d887c7faca559007eb2000cc5663267db70407d | /Intern Project/Importing Data AMFI/Importing data from API MF.py | 9c9947a7901fd7553663c063a12c776f592ab305 | [] | no_license | yuanmimi/Intern-V2 | b67175850273fe51036656cf33612a6e7b461ab3 | a167003121649f9f5bfd8fb5a3d6143ed70ac27c | refs/heads/master | 2020-07-03T10:47:08.886698 | 2018-05-03T13:01:18 | 2018-05-03T13:01:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 11:47:20 2018
@author: ashwin.monpur
"""
import requests
import pandas as pd
#from pymongo import MongoClient
from bs4 import BeautifulSoup
conn = MongoClient()['mf']
from sqlalchemy import create_engine
engine = create_engine("mysql://neel:pass@123@localhost/mf")
dt_range = pd.date_range(start='04-01-2006', end=pd.datetime.today()).tolist()
while dt_range:
dt = dt_range.pop(0).strftime('%d-%b-%Y')
url_tmplt = 'http://portal.amfiindia.com/DownloadNAVHistoryReport_Po.aspx' \
'?frmdt={0}&todt={1}'
txt = requests.get(url_tmplt.format(dt, dt)).text
dat = [i.strip() for i in txt.split('\n') if ';' in i]
dat = [i.split(';') for i in dat]
df = pd.DataFrame(dat)
df.columns = [i.replace(' ', '_') for i in df.iloc[0]]
df = df.drop(0)
df.Date = pd.to_datetime(df.Date, format='%d-%b-%Y')
conn.insert_many(df.to_dict(orient='record'))
df.to_sql('daily_data_'+dt.split('-')[-1], engine, if_exists='append')
print(dt)
txt = requests.get('http://fundpicker.thefundoo.com/FundCard/1916/Tata-Equity-PE--G-').text
soup = BeautifulSoup(txt,'lxml')
soup.find_all('tbody',{'id':'tbody_consist'})
tbody_consist | [
"noreply@github.com"
] | yuanmimi.noreply@github.com |
297467e64e5b45612d4fe55253b3388b8442f79f | 770d4df866b9e66a333f3ffeacdd659b8553923a | /results/0193/config.py | fbbe800c6116da5429a209d219fc7846de53d1e2 | [] | no_license | leojo/ResultsOverview | b2062244cbd81bc06b99963ae9b1695fa9718f90 | a396abc7a5b4ab257150c0d37c40b646ebb13fcf | refs/heads/master | 2020-03-20T19:52:37.217926 | 2018-08-05T12:50:27 | 2018-08-05T12:50:27 | 137,656,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-3
learning_rate_max = 1e-3
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-7
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["sax-baritone","violin"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
| [
"leojohannsson91@gmail.com"
] | leojohannsson91@gmail.com |
8324ea9c3819e8342075531c35de89cf7e1ffaae | 41495ab6e6e646866d8fb26af504214384fa18f6 | /cloud_scheduler/filters/disk_filter.py | bbdb1c65b8c8256add7e8be0f0425efb5f8190a1 | [] | no_license | glfpes/cloud_scheduler | 8760c0c3be9e537ae2ce040f5fdffd18e8920c0a | 09d9a58a9c23f0794098381e4a9e38336fc69d90 | refs/heads/master | 2021-01-19T00:06:37.698895 | 2016-06-07T09:28:23 | 2016-06-07T09:28:23 | 54,368,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py |
from cloud_scheduler import filters
class DiskFilter(filters.BaseCloudFilter):
def cloud_passes(self, cloud_state, filter_properties):
disk_limit_gb = filter_properties['disk_gb']
return cloud_state['disk_gb'] >= disk_limit_gb
@staticmethod
def get_mark(self):
return 'disk_gb'
| [
"glfpes@gmail.com"
] | glfpes@gmail.com |
59df0d23f042d14bf354f6eaf802191e9da2833e | 3976b7564bae6867fefeaeca8c2f600251c9d3f5 | /aquacrop/classes.py | 340cbfc869e5839549d58d5c5b10088f75cd79ce | [
"Apache-2.0"
] | permissive | LLatyki/aquacrop | 9391aadb44c847f0c0d363b410846e122399be62 | 0382df63c126bec2754ac7ee3e8b4ef2816d8c0d | refs/heads/master | 2023-03-20T01:14:20.275605 | 2021-03-02T10:43:45 | 2021-03-02T10:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,333 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_classes.ipynb (unless otherwise specified).
__all__ = ['ClockStructClass', 'OutputClass', 'ParamStructClass', 'SoilClass', 'CropClass', 'IrrMngtClass',
'IrrMngtStruct', 'spec', 'FieldMngtClass', 'FieldMngtStruct', 'spec', 'GwClass', 'InitWCClass', 'CropStruct',
'spec', 'InitCondClass', 'spec', 'WevapClass', 'spec', 'SoilProfileClass', 'spec', 'TAWClass', 'spec',
'DrClass', 'spec', 'thRZClass', 'spec', 'KswClass', 'spec', 'KstClass', 'spec', 'CO2Class', 'spec']
# Cell
import numpy as np
import pandas as pd
from numba.experimental import jitclass
from numba import float64, int64, boolean
# Cell
class ClockStructClass:
'''
Contains model information regarding dates and step times etc.
Atributes:\n
`TimeStepCounter` : `int`: Keeps track of current timestep
`ModelTermination` : `Bool`: False unless model has finished
`SimulationStartDate` : `np.Datetime64`: Date of simulation start
`SimulationEndDate` : `np.Datetime64`: Date of simulation end
`TimeStep` : `int`: time step (evaluation needed)
`nSteps` : `int`: total number of days of simulation
`TimeSpan` : `np.array`: all dates (np.Datetime64) that lie within the start and end dates of simulation
`StepStartTime` : `np.Datetime64`: Date at start of timestep
`StepEndTime` : `np.Datetime64`: Date at end of timestep
`EvapTimeSteps` : `int`: Number of time-steps (per day) for soil evaporation calculation
`SimOffSeason` : `str`: 'Y' if you want to simulate the off season, 'N' otherwise
`PlantingDates` : `list-like`: list of planting dates in datetime format
`HarvestDates` : `list-like`: list of harvest dates in datetime format
`nSeasons` : `int`: Total number of seasons to be simulated
`SeasonCounter` : `int`: counter to keep track of which season we are currenlty simulating
'''
def __init__(self):
self.TimeStepCounter = 0 # Keeps track of current timestep
self.ModelTermination = False # False unless model has finished
self.SimulationStartDate = 0 # Date of simulation start
self.SimulationEndDate = 0 # Date of simulation end
self.TimeStep = 0 # time step (evaluaiton needed)
self.nSteps = 0 # total number of days of simulation
self.TimeSpan = 0 # all dates that lie within the start and end dates of simulation
self.StepStartTime = 0 # Date at start of timestep
self.StepEndTime = 0 # Date at start of timestep
self.EvapTimeSteps = 20 # Number of time-steps (per day) for soil evaporation calculation
self.SimOffSeason = 'N' # 'Y' if you want to simulate the off season, 'N' otherwise
self.PlantingDates = [] # list of crop planting dates during simulation
self.HarvestDates = [] # list of crop planting dates during simulation
self.nSeasons = 0 # total number of seasons (plant and harvest)
self.SeasonCounter = -1 # running counter of seasons
# Cell
class OutputClass():
'''
Class to hold output data
**Atributes**:\n
`Water` : `pandas.DataFrame` : Water storage in soil
`Flux` : `pandas.DataFrame` : Water flux
`Growth` : `pandas.DataFrame` : crop growth
`Final` : `pandas.DataFrame` : final stats
'''
def __init__(self):
self.Water = []
self.Flux = []
self.Growth = []
self.Final = []
# Cell
class ParamStructClass:
'''
The ParamStruct class contains the bulk of model Paramaters. In general these will not change over the course of the simulation
**Attributes**:\n
`Soil` : `SoilClass` : Soil object contains data and paramaters related to the soil
`FallowFieldMngt` : `FieldMngtClass` : Object containing field management variables for the off season (fallow periods)
`NCrops` : `int` : Number of crop types to be simulated
`SpecifiedPlantCalander` : `str` : Specified crop rotation calendar (Y or N)
`CropChoices` : `list` : List of crop type names in each simulated season
`CO2data` : `pd.Series` : CO2 data indexed by year
`CO2` : `CO2Class` : object containing reference and current co2 concentration
`WaterTable` : `int` : Water table present (1=yes, 0=no)
`zGW` : `np.array` : WaterTable depth (mm) for each day of simulation
`zGW_dates` : `np.array` : Corresponding dates to the zGW values
`WTMethod` : `str` : 'Constant' or 'Variable'
`CropList` : `list` : List of Crop Objects which contain paramaters for all the differnet crops used in simulations
`python_crop_list` : `list` : List of Crop Objects, one for each season
`python_fallow_crop` : `CropClass` : Crop object for off season
`Seasonal_Crop_List` : `list` : List of CropStructs, one for each season (jit class objects)
`crop_name_list` : `list` : List of crop names, one for each season
`Fallow_Crop` : `CropStruct` : CropStruct object (jit class) for off season
`Fallow_Crop_Name` : `str` : name of fallow crop
'''
def __init__(self):
# soil
self.Soil = 0
# field management
self.FallowFieldMngt = 0
# variables extracted from cropmix.txt
self.NCrops = 0
self.SpecifiedPlantCalander = ""
self.RotationFilename = ""
# calculated Co2 variables
self.CO2data = []
self.CO2 = 0
#water table
self.WaterTable = 0
self.zGW = []
self.zGW_dates = []
self.WTMethod = ""
#crops
self.CropList = []
self.python_crop_list = []
self.python_fallow_crop = 0
self.Seasonal_Crop_List =[]
self.crop_name_list =[]
self.Fallow_Crop =0
self.Fallow_Crop_Name =""
# Cell
class SoilClass:
'''
The Soil Class contains Paramaters and variables of the soil used in the simulation
**Attributes**:\n
`profile` : `pandas.DataFrame` : holds soil profile information
`Profile` : `SoilProfileClass` : jit class object holdsing soil profile information
`Hydrology` : `pandas.DataFrame`: holds soil layer hydrology informaiton
`Comp` : `pandas.DataFrame` : holds soil compartment information
A number of float attributes specified in the initialisation of the class
'''
def __init__(self,soilType,dz=[0.1]*12,
AdjREW= 1,REW= 9.0,CalcCN=0,CN=61.0,zRes=-999,
EvapZsurf = 0.04, EvapZmin = 0.15, EvapZmax = 0.30,
Kex = 1.1, fevap = 4, fWrelExp = 0.4, fwcc = 50,
zCN = 0.3, zGerm = 0.3,AdjCN=1, fshape_cr = 16, zTop = 0.1,):
self.Name=soilType
self.zSoil= sum(dz) # Total thickness of soil profile (m)
self.nComp= len(dz) # Total number of soil compartments
self.nLayer= 0 # Total number of soil layers
self.AdjREW= AdjREW # Adjust default value for readily evaporable water (0 = No, 1 = Yes)
self.REW= REW # Readily evaporable water (mm) (only used if adjusting from default value)
self.CalcCN= CalcCN # adjust Curve number based on Ksat
self.CN= CN # Curve number (0 = No, 1 = Yes)
self.zRes= zRes # Depth of restrictive soil layer (set to negative value if not present)
# Assign default program properties (should not be changed without expert knowledge)
self.EvapZsurf = EvapZsurf # Thickness of soil surface skin evaporation layer (m)
self.EvapZmin = EvapZmin # Minimum thickness of full soil surface evaporation layer (m)
self.EvapZmax = EvapZmax # Maximum thickness of full soil surface evaporation layer (m)
self.Kex = Kex # Maximum soil evaporation coefficient
self.fevap = fevap # Shape factor describing reduction in soil evaporation in stage 2.
self.fWrelExp = fWrelExp # Proportional value of Wrel at which soil evaporation layer expands
self.fwcc = fwcc # Maximum coefficient for soil evaporation reduction due to sheltering effect of withered canopy
self.zCN = zCN # Thickness of soil surface (m) used to calculate water content to adjust curve number
self.zGerm = zGerm # Thickness of soil surface (m) used to calculate water content for germination
self.AdjCN = AdjCN # Adjust curve number for antecedent moisture content (0: No, 1: Yes)
self.fshape_cr = fshape_cr # Capillary rise shape factor
self.zTop = max(zTop,dz[0]) # Thickness of soil surface layer for water stress comparisons (m)
if soilType == 'custom':
self.create_df(dz)
elif soilType == 'Clay':
self.CN = 77
self.CalcCN = 0
self.REW = 14
self.create_df(dz)
self.add_layer(sum(dz), 0.39, 0.54, 0.55, 35, 100)
elif soilType == 'ClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.23, 0.39, 0.5, 125, 100)
elif soilType == 'Loam':
self.CN = 61
self.CalcCN = 0
self.REW = 9
self.create_df(dz)
self.add_layer(sum(dz), 0.15, 0.31, 0.46, 500, 100)
elif soilType == 'LoamySand':
self.CN = 46
self.CalcCN = 0
self.REW = 5
self.create_df(dz)
self.add_layer(sum(dz), 0.08, 0.16, 0.38, 2200, 100)
elif soilType == 'Sand':
self.CN = 46
self.CalcCN = 0
self.REW = 4
self.create_df(dz)
self.add_layer(sum(dz), 0.06, 0.13, 0.36, 3000, 100)
elif soilType == 'SandyClay':
self.CN = 77
self.CalcCN = 0
self.REW = 10
self.create_df(dz)
self.add_layer(sum(dz), 0.27, 0.39, 0.5, 35, 100)
elif soilType == 'SandyClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 9
self.create_df(dz)
self.add_layer(sum(dz), 0.20, 0.32, 0.47, 225, 100)
elif soilType == 'SandyLoam':
self.CN = 46
self.CalcCN = 0
self.REW = 7
self.create_df(dz)
self.add_layer(sum(dz), 0.10, 0.22, 0.41, 1200, 100)
elif soilType == 'Silt':
self.CN = 61
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.09, 0.33, 0.43, 500, 100)
elif soilType == 'SiltClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 13
self.create_df(dz)
self.add_layer(sum(dz), 0.23, 0.44, 0.52, 150, 100)
elif soilType == 'SiltLoam':
self.CN = 61
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.13, 0.33, 0.46, 575, 100)
elif soilType == 'SiltClay':
self.CN = 72
self.CalcCN = 0
self.REW = 14
self.create_df(dz)
self.add_layer(sum(dz), 0.32, 0.50, 0.54, 100, 100)
elif soilType == 'Paddy':
self.CN = 77
self.CalcCN = 0
self.REW = 10
self.create_df(dz)
self.add_layer(0.5, 0.32, 0.50, 0.54, 15, 100)
self.add_layer(1.5, 0.39, 0.54, 0.55, 2, 100)
elif soilType == 'ac_TunisLocal':
self.CN = 46
self.CalcCN = 0
self.REW = 7
dz = [0.1]*6 + [0.15]*5 + [0.2]
self.create_df(dz)
self.add_layer(0.3, 0.24, 0.40, 0.50, 155, 100)
self.add_layer(1.7, 0.11, 0.33, 0.46, 500, 100)
else:
print('wrong soil type')
assert 1==2
def __repr__(self):
for key in self.__dict__:
if key != 'profile':
print(f"{key}: {getattr(self,key)}")
return ' '
def create_df(self,dz):
self.profile = pd.DataFrame(np.empty((len(dz),4)),columns=["Comp","Layer","dz","dzsum"])
self.profile.dz = dz
self.profile.dzsum = np.cumsum(self.profile.dz).round(2)
self.profile.Comp = np.arange(len(dz))
self.profile.Layer = np.nan
self.profile["zBot"] = self.profile.dzsum
self.profile["zTop"] = self.profile["zBot"]-self.profile.dz
self.profile["zMid"] = (self.profile["zTop"]+self.profile["zBot"])/2
def calculate_soil_hydraulic_properties(self,Sand,Clay,OrgMat,DF=1):
"""
Function to calculate soil hydraulic properties, given textural inputs.
Calculations use pedotransfer function equations described in Saxton and Rawls (2006)
"""
# do calculations
#Water content at permanent wilting point
Pred_thWP = ( -(0.024*Sand) + (0.487*Clay) + (0.006*OrgMat)
+(0.005*Sand*OrgMat) - (0.013*Clay*OrgMat)
+(0.068*Sand*Clay) + 0.031 )
th_wp = Pred_thWP+(0.14*Pred_thWP)-0.02
# Water content at field capacity and saturation
Pred_thFC = ( -(0.0251*Sand) + (0.195*Clay) + (0.011*OrgMat)
+(0.006*Sand*OrgMat) - (0.027*Clay*OrgMat)
+(0.452*Sand*Clay) + 0.299 )
PredAdj_thFC = Pred_thFC+((1.283*(np.power(Pred_thFC,2)))-(0.374*Pred_thFC)-0.015)
Pred_thS33 = ( (0.0278*Sand) + (0.034*Clay) + (0.022*OrgMat)
-(0.018*Sand*OrgMat) - (0.027*Clay*OrgMat)
+(0.584*Sand*Clay) + 0.078 )
PredAdj_thS33 = Pred_thS33+((0.636*Pred_thS33)-0.107)
Pred_thS = (PredAdj_thFC+PredAdj_thS33)+((-0.097*Sand)+0.043)
pN = (1-Pred_thS)*2.65
pDF = pN*DF
PorosComp = (1-(pDF/2.65))-(1-(pN/2.65))
PorosCompOM = 1-(pDF/2.65)
DensAdj_thFC = PredAdj_thFC+(0.2*PorosComp)
DensAdj_thS = PorosCompOM
th_fc = DensAdj_thFC
th_s = DensAdj_thS
# Saturated hydraulic conductivity (mm/day)
lmbda = 1/((np.log(1500)-np.log(33))/(np.log(th_fc)-np.log(th_wp)))
Ksat = (1930*(th_s-th_fc)**(3-lmbda))*24
# Water content at air dry
th_dry = th_wp / 2
#round values
th_dry = round(10_000*th_dry)/10_000
th_wp = round(1000*th_wp)/1000
th_fc = round(1000*th_fc)/1000
th_s = round(1000*th_s)/1000
Ksat = round(10*Ksat)/10
return th_wp,th_fc,th_s,Ksat
def add_layer_from_texture(self,thickness,Sand,Clay,OrgMat,penetrability):
th_wp,th_fc,th_s,Ksat=self.calculate_soil_hydraulic_properties(Sand/100,Clay/100,OrgMat)
self.add_layer(thickness, th_wp, th_fc, th_s, Ksat, penetrability)
def add_layer(self,thickness, thWP, thFC, thS, Ksat, penetrability):
self.nLayer +=1
num_layers = len(self.profile.dropna().Layer.unique())
new_layer = num_layers+1
if new_layer==1:
self.profile.loc[(round(thickness,2)>=round(self.profile.dzsum,2)),"Layer"] = new_layer
else:
last = self.profile[self.profile.Layer==new_layer-1].dzsum.values[-1]
self.profile.loc[(thickness+last>=self.profile.dzsum) & (self.profile.Layer.isna()),"Layer"] = new_layer
self.profile.loc[self.profile.Layer==new_layer,"th_dry"] = self.profile.Layer.map({new_layer:thWP/2})
self.profile.loc[self.profile.Layer==new_layer,"th_wp"] = self.profile.Layer.map({new_layer:thWP})
self.profile.loc[self.profile.Layer==new_layer,"th_fc"] = self.profile.Layer.map({new_layer:thFC})
self.profile.loc[self.profile.Layer==new_layer,"th_s"] = self.profile.Layer.map({new_layer:thS})
self.profile.loc[self.profile.Layer==new_layer,"Ksat"] = self.profile.Layer.map({new_layer:Ksat})
self.profile.loc[self.profile.Layer==new_layer,"penetrability"] = self.profile.Layer.map({new_layer:penetrability})
# Calculate drainage characteristic (tau)
# Calculations use equation given by Raes et al. 2012
tau = round(0.0866*(Ksat**0.35),2)
if tau > 1:
tau = 1
elif tau < 0:
tau = 0
self.profile.loc[self.profile.Layer==new_layer,"tau"] = self.profile.Layer.map({new_layer:tau})
def fill_nan(self,):
self.profile = self.profile.fillna(method='ffill')
self.profile.dz = self.profile.dz.round(2)
self.profile.dzsum = self.profile.dz.cumsum().round(2)
self.zSoil = round(self.profile.dz.sum(),2)
self.nComp = len(self.profile)
self.profile.Layer = self.profile.Layer.astype(int)
def add_capillary_rise_params(self,):
# Calculate capillary rise parameters for all soil layers
# Only do calculation if water table is present. Calculations use equations
# described in Raes et al. (2012)
prof = self.profile
hydf = prof.groupby('Layer').mean().drop(['dz','dzsum'],axis=1)
hydf["aCR"] = 0
hydf["bCR"] = 0
for layer in hydf.index.unique():
layer = int(layer)
soil=hydf.loc[layer]
thwp = soil.th_wp
thfc = soil.th_fc
ths = soil.th_s
Ksat = soil.Ksat
aCR = 0
bCR = 0
if (thwp >= 0.04) and (thwp <= 0.15) and (thfc >= 0.09) and \
(thfc <= 0.28) and (ths >= 0.32) and (ths <= 0.51):
# Sandy soil class
if (Ksat >= 200) and (Ksat <= 2000):
aCR = -0.3112-(Ksat*(1e-5))
bCR = -1.4936+(0.2416*np.log(Ksat))
elif Ksat < 200:
aCR = -0.3112-(200*(1e-5));
bCR = -1.4936+(0.2416*np.log(200))
elif Ksat > 2000:
aCR = -0.3112-(2000*(1e-5));
bCR = -1.4936+(0.2416*np.log(2000));
elif (thwp >= 0.06) and (thwp <= 0.20) and (thfc >= 0.23) and \
(thfc <= 0.42) and (ths >= 0.42) and (ths <= 0.55):
# Loamy soil class
if (Ksat >= 100) and (Ksat <= 750):
aCR = -0.4986+(9*(1e-5)*Ksat)
bCR = -2.132+(0.4778*np.log(Ksat))
elif Ksat < 100:
aCR = -0.4986+(9*(1e-5)*100)
bCR = -2.132+(0.4778*np.log(100))
elif Ksat > 750:
aCR = -0.4986+(9*(1e-5)*750)
bCR = -2.132+(0.4778*np.log(750))
elif (thwp >= 0.16) and (thwp <= 0.34) and (thfc >= 0.25) and \
(thfc <= 0.45) and (ths >= 0.40) and (ths <= 0.53):
# Sandy clayey soil class
if (Ksat >= 5) and (Ksat <= 150):
aCR = -0.5677-(4*(1e-5)*Ksat)
bCR = -3.7189+(0.5922*np.log(Ksat))
elif Ksat < 5:
aCR = -0.5677-(4*(1e-5)*5)
bCR = -3.7189+(0.5922*np.log(5))
elif Ksat > 150:
aCR = -0.5677-(4*(1e-5)*150)
bCR = -3.7189+(0.5922*np.log(150))
elif (thwp >= 0.20) and (thwp <= 0.42) and (thfc >= 0.40) and \
(thfc <= 0.58) and (ths >= 0.49) and (ths <= 0.58):
# Silty clayey soil class
if (Ksat >= 1) and (Ksat <= 150):
aCR = -0.6366+(8*(1e-4)*Ksat)
bCR = -1.9165+(0.7063*np.log(Ksat))
elif Ksat < 1:
aCR = -0.6366+(8*(1e-4)*1)
bCR = -1.9165+(0.7063*np.log(1))
elif Ksat > 150:
aCR = -0.6366+(8*(1e-4)*150)
bCR = -1.9165+(0.7063*np.log(150))
assert aCR != 0
assert bCR != 0
prof.loc[prof.Layer==layer,"aCR"] = prof.Layer.map({layer:aCR})
prof.loc[prof.Layer==layer,"bCR"] = prof.Layer.map({layer:bCR})
self.profile=prof
# Cell
class CropClass:
'''
The Crop Class contains Paramaters and variables of the crop used in the simulation
**Attributes**:\n
`c_name`: `str`: crop name ('custom' or one of built in defaults e.g. 'Maize')
`PlantingDate` : `str` : Planting Date (mm/dd)
`HarvestDate` : `str` : Latest Harvest Date (mm/dd)
`CropType` : `int` : Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
`PlantMethod` : `int` : Planting method (0 = Transplanted, 1 = Sown)
`CalendarType` : `int` : Calendar Type (1 = Calendar days, 2 = Growing degree days)
`SwitchGDD` : `int` : Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
`IrrMngt`: `dict` : dictionary containting irrigation management information
`IrrSchd` : `pandas.DataFrame` : pandas DataFrame containing the Irrigation Schedule if predefined
`FieldMngt` : `dict` : Dictionary containing field management variables for the growing season of the crop
A number of default program properties of type float are also specified during initialisation
'''
def __init__(self,c_name,PlantingDate,HarvestDate=None,**kwargs):
self.Name = ''
# Assign default program properties (should not be changed without expert knowledge)
self.fshape_b = 13.8135 # Shape factor describing the reduction in biomass production for insufficient growing degree days
self.PctZmin = 70 # Initial percentage of minimum effective rooting depth
self.fshape_ex = -6 # Shape factor describing the effects of water stress on root expansion
self.ETadj = 1 # Adjustment to water stress thresholds depending on daily ET0 (0 = No, 1 = Yes)
self.Aer = 5 # Vol (%) below saturation at which stress begins to occur due to deficient aeration
self.LagAer = 3 # Number of days lag before aeration stress affects crop growth
self.beta = 12 # Reduction (%) to p_lo3 when early canopy senescence is triggered
self.a_Tr = 1 # Exponent parameter for adjustment of Kcx once senescence is triggered
self.GermThr = 0.2 # Proportion of total water storage needed for crop to germinate
self.CCmin = 0.05 # Minimum canopy size below which yield formation cannot occur
self.MaxFlowPct = 100/3 # Proportion of total flowering time (%) at which peak flowering occurs
self.HIini = 0.01 # Initial harvest index
self.bsted = 0.000138 # WP co2 adjustment parameter given by Steduto et al. 2007
self.bface = 0.001165 # WP co2 adjustment parameter given by FACE experiments
if c_name == 'Maize':
self.Name = 'Maize'
# added in Read_Model_Paramaters
self.CropType= 3 # Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
self.PlantMethod= 1 # Planting method (0 = Transplanted, 1 = Sown)
self.CalendarType= 2 # Calendar Type (1 = Calendar days, 2 = Growing degree days)
self.SwitchGDD= 0 # Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
self.Emergence = 80 # Growing degree/Calendar days from sowing to emergence/transplant recovery
self.MaxRooting = 1420 # Growing degree/Calendar days from sowing to maximum rooting
self.Senescence = 1420 # Growing degree/Calendar days from sowing to senescence
self.Maturity = 1670 # Growing degree/Calendar days from sowing to maturity
self.HIstart = 850 # Growing degree/Calendar days from sowing to start of yield formation
self.Flowering = 190 # Duration of flowering in growing degree/calendar days (-999 for non-fruit/grain crops)
self.YldForm = 775 # Duration of yield formation in growing degree/calendar days
self.GDDmethod = 2 # Growing degree day calculation method
self.Tbase = 8 # Base temperature (degC) below which growth does not progress
self.Tupp = 30 # Upper temperature (degC) above which crop development no longer increases
self.PolHeatStress = 1 # Pollination affected by heat stress (0 = No, 1 = Yes)
self.Tmax_up = 40 # Maximum air temperature (degC) above which pollination begins to fail
self.Tmax_lo = 45 # Maximum air temperature (degC) at which pollination completely fails
self.PolColdStress = 1 # Pollination affected by cold stress (0 = No, 1 = Yes)
self.Tmin_up = 10 # Minimum air temperature (degC) below which pollination begins to fail
self.Tmin_lo = 5 # Minimum air temperature (degC) at which pollination completely fails
self.TrColdStress = 1 # Transpiration affected by cold temperature stress (0 = No, 1 = Yes)
self.GDD_up = 12 # Minimum growing degree days (degC/day) required for full crop transpiration potential
self.GDD_lo = 0 # Growing degree days (degC/day) at which no crop transpiration occurs
self.Zmin = 0.3 # Minimum effective rooting depth (m)
self.Zmax = 1.7 # Maximum rooting depth (m)
self.fshape_r = 1.3 # Shape factor describing root expansion
self.SxTopQ = 0.0480 # Maximum root water extraction at top of the root zone (m3/m3/day)
self.SxBotQ = 0.0117 # Maximum root water extraction at the bottom of the root zone (m3/m3/day)
self.SeedSize = 6.5 # Soil surface area (cm2) covered by an individual seedling at 90% emergence
self.PlantPop = 75_000 # Number of plants per hectare
self.CCx = 0.96 # Maximum canopy cover (fraction of soil cover)
self.CDC = 0.01 # Canopy decline coefficient (fraction per GDD/calendar day)
self.CGC = 0.0125 # Canopy growth coefficient (fraction per GDD)
self.Kcb = 1.05 # Crop coefficient when canopy growth is complete but prior to senescence
self.fage = 0.3 # Decline of crop coefficient due to ageing (%/day)
self.WP = 33.7 # Water productivity normalized for ET0 and C02 (g/m2)
self.WPy = 100 # Adjustment of water productivity in yield formation stage (% of WP)
self.fsink = 0.5 # Crop performance under elevated atmospheric CO2 concentration (%/100)
self.HI0 = 0.48 # Reference harvest index
self.dHI_pre = 0 # Possible increase of harvest index due to water stress before flowering (%)
self.a_HI = 7 # Coefficient describing positive impact on harvest index of restricted vegetative growth during yield formation
self.b_HI = 3 # Coefficient describing negative impact on harvest index of stomatal closure during yield formation
self.dHI0 = 15 # Maximum allowable increase of harvest index above reference value
self.Determinant = 1 # Crop Determinancy (0 = Indeterminant, 1 = Determinant)
self.exc = 50 # Excess of potential fruits
self.p_up1 = 0.14 # Upper soil water depletion threshold for water stress effects on affect canopy expansion
self.p_up2 = 0.69 # Upper soil water depletion threshold for water stress effects on canopy stomatal control
self.p_up3 = 0.69 # Upper soil water depletion threshold for water stress effects on canopy senescence
self.p_up4 = 0.8 # Upper soil water depletion threshold for water stress effects on canopy pollination
self.p_lo1 = 0.72 # Lower soil water depletion threshold for water stress effects on canopy expansion
self.p_lo2 = 1 # Lower soil water depletion threshold for water stress effects on canopy stomatal control
self.p_lo3 = 1 # Lower soil water depletion threshold for water stress effects on canopy senescence
self.p_lo4 = 1 # Lower soil water depletion threshold for water stress effects on canopy pollination
self.fshape_w1 = 2.9 # Shape factor describing water stress effects on canopy expansion
self.fshape_w2 = 6 # Shape factor describing water stress effects on stomatal control
self.fshape_w3 = 2.7 # Shape factor describing water stress effects on canopy senescence
self.fshape_w4 = 1 # Shape factor describing water stress effects on pollination
elif c_name == 'Wheat':
self.Name = 'Wheat'
self.CropType= 3; self.PlantMethod= 1; self.CalendarType= 2
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '10/15'; self.HarvestDate= '05/30'
self.Emergence = 150; self.MaxRooting = 864; self.Senescence = 1700
self.Maturity = 2400; self.HIstart = 1250; self.Flowering = 200
self.YldForm = 1100; self.GDDmethod = 3; self.Tbase = 0
self.Tupp = 26; self.PolHeatStress = 1; self.Tmax_up = 35
self.Tmax_lo = 40; self.PolColdStress = 1; self.Tmin_up = 5
self.Tmin_lo = 0; self.TrColdStress = 1; self.GDD_up = 14
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 1.5
self.fshape_r = 1.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 1.5; self.PlantPop = 4_500_000; self.CCx = 0.96
self.CDC = 0.004; self.CGC = 0.005001; self.Kcb = 1.1
self.fage = 0.15; self.WP = 15; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.48; self.dHI_pre = 5
self.a_HI = 10; self.b_HI = 7; self.dHI0 = 15
self.Determinant = 1; self.exc = 100; self.p_up1 = 0.2
self.p_up2 = 0.65; self.p_up3 = 0.7; self.p_up4 = 0.85
self.p_lo1 = 0.65; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 5.; self.fshape_w2 = 2.5
self.fshape_w3 = 2.5; self.fshape_w4 = 1.
elif c_name == 'Potato':
self.Name = 'Potato'
self.CropType= 2; self.PlantMethod= 0; self.CalendarType= 1
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '04/25'; self.HarvestDate= '08/30'
self.Emergence = 15; self.MaxRooting = 50; self.Senescence = 105
self.Maturity = 125; self.HIstart = 46; self.Flowering = -999
self.YldForm = 77; self.GDDmethod = 3; self.Tbase = 2
self.Tupp = 26; self.PolHeatStress = 0; self.Tmax_up = -999
self.Tmax_lo = -999; self.PolColdStress = 0; self.Tmin_up = -999
self.Tmin_lo = -999; self.TrColdStress = 1; self.GDD_up = 7
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 0.6
self.fshape_r = 1.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 15; self.PlantPop = 40_000; self.CCx = 0.92
self.CDC = 0.01884; self.CGC = 0.126; self.Kcb = 1.1
self.fage = 0.15; self.WP = 18; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.85; self.dHI_pre = 2
self.a_HI = 0; self.b_HI = 10; self.dHI0 = 5
self.Determinant = 0; self.exc = 0; self.p_up1 = 0.2
self.p_up2 = 0.6; self.p_up3 = 0.7; self.p_up4 = 0.8
self.p_lo1 = 0.6; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 3.; self.fshape_w2 = 3
self.fshape_w3 = 3; self.fshape_w4 = 0
elif c_name == 'Rice':
self.Name = 'Rice'
self.CropType= 3; self.PlantMethod= 0; self.CalendarType= 2
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '08/01'; self.HarvestDate= '11/30'
self.Emergence = 102; self.MaxRooting = 381; self.Senescence = 1450
self.Maturity = 1707; self.HIstart = 1088; self.Flowering = 318
self.YldForm = 577; self.GDDmethod = 3; self.Tbase = 8
self.Tupp = 30; self.PolHeatStress = 1; self.Tmax_up = 35
self.Tmax_lo = 40; self.PolColdStress = 1; self.Tmin_up = 8
self.Tmin_lo = 3; self.TrColdStress = 1; self.GDD_up = 10
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 0.5
self.fshape_r = 2.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 6; self.PlantPop = 1_000_000; self.CCx = 0.95
self.CDC = 0.006172; self.CGC = 0.006163; self.Kcb = 1.1
self.fage = 0.15; self.WP = 19; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.43; self.dHI_pre = 0
self.a_HI = 10; self.b_HI = 7; self.dHI0 = 15
self.Determinant = 1; self.exc = 100; self.p_up1 = 0
self.p_up2 = 0.5; self.p_up3 = 0.55; self.p_up4 = 0.75
self.p_lo1 = 0.4; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 3.; self.fshape_w2 = 3
self.fshape_w3 = 3; self.fshape_w4 = 2.7
# no aeration stress for rice
self.Aer = -1e10; self.LagAer = 1e10
elif c_name == 'custom':
# temporary solution for new crops
# if using this ensure that all paramaters in 'allowed_keys'
# are passed in as arguments at initialization
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
self.Name = 'custom'
else:
assert 1==2, 'wrong crop name'
# set any paramaters specified by user
allowed_keys = {'fshape_b','PctZmin','fshape_ex','ETadj','Aer','LagAer',
'beta','a_Tr','GermThr','CCmin','MaxFlowPct','HIini',
'bsted','bface','CropType','PlantMethod','CalendarType','SwitchGDD','PlantingDate',
'HarvestDate','Emergence','MaxRooting','Senescence','Maturity',
'HIstart','Flowering','YldForm','GDDmethod','Tbase','Tupp',
'PolHeatStress','Tmax_up','Tmax_lo','PolColdStress','Tmin_up',
'Tmin_lo','TrColdStress','GDD_up','GDD_lo','Zmin','Zmax',
'fshape_r','SxTopQ','SxBotQ','SeedSize','PlantPop','CCx','CDC',
'CGC','Kcb','fage','WP','WPy','fsink','HI0','dHI_pre','a_HI','b_HI',
'dHI0','Determinant','exc','p_up1','p_up2','p_up3','p_up4',
'p_lo1','p_lo2','p_lo3','p_lo4','fshape_w1','fshape_w2','fshape_w3',
'fshape_w4'}
self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)
self.calculate_additional_params()
def calculate_additional_params(self,):
# Calculate additional parameters for all self types in mix
# Fractional canopy cover size at emergence
self.CC0 = self.PlantPop*self.SeedSize*1e-8
# Root extraction terms
SxTopQ = self.SxTopQ
SxBotQ = self.SxBotQ
S1 = self.SxTopQ
S2 = self.SxBotQ
if S1 == S2:
SxTop = S1
SxBot = S2
else:
if SxTopQ < SxBotQ:
S1 = SxBotQ
S2 = SxTopQ
xx = 3*(S2/(S1-S2))
if xx < 0.5:
SS1 = (4/3.5)*S1
SS2 = 0
else:
SS1 = (xx+3.5)*(S1/(xx+3))
SS2 = (xx-0.5)*(S2/xx)
if SxTopQ > SxBotQ:
SxTop = SS1
SxBot = SS2
else:
SxTop = SS2
SxBot = SS1
self.SxTop = SxTop
self.SxBot = SxBot
# Water stress thresholds
self.p_up = np.array([self.p_up1,self.p_up2,self.p_up3,self.p_up4])
self.p_lo = np.array([self.p_lo1,self.p_lo2,self.p_lo3,self.p_lo4])
self.fshape_w = np.array([self.fshape_w1,self.fshape_w2,self.fshape_w3,self.fshape_w4])
# def flowerfun(self,xx):
# assert self.CropType == 3
# return (0.00558*(xx**0.63))-(0.000969*xx)-0.00383
# Cell
class IrrMngtClass:
"""
Farmer Class defines irrigation strategy
**Attributes:**\n
`Name` : `str` : name
`IrrMethod` : `int` : Irrigation method {0: rainfed, 1: soil moisture targets, 2: set time interval,
3: predifined schedule, 4: net irrigation, 5: constant depth }
`WetSurf` : `int` : Soil surface wetted by irrigation (%)
`AppEff` : `int` : Irrigation application efficiency (%)
`MaxIrr` : `float` : Maximum depth (mm) that can be applied each day
`SMT` : `list` : Soil moisture targets (%TAW) to maintain in each growth stage (only used if irrigation method is equal to 1)
`IrrInterval` : `int` : Irrigation interval in days (only used if irrigation method is equal to 2)
`Schedule` : `pandas.DataFrame` : DataFrame containing dates and depths
`NetIrrSMT` : `float` : Net irrigation threshold moisture level (% of TAW that will be maintained, for IrrMethod=4)
`Depth` : `float` : constant depth to apply on each day
"""
def __init__(self,IrrMethod,**kwargs):
self.IrrMethod=IrrMethod
self.WetSurf = 100.
self.AppEff = 100.
self.MaxIrr = 25.
self.MaxIrrSeason = 10_000.
self.SMT=np.zeros(4)
self.IrrInterval = 0
self.Schedule=[]
self.NetIrrSMT = 80.
self.depth = 0.
if IrrMethod == 1:
self.SMT=[100]*4
if IrrMethod == 2:
self.IrrInterval = 3
if IrrMethod == 3:
#wants a pandas dataframe with Date and Depth, pd.Datetime and float
"""
dates = pd.DatetimeIndex(['20/10/1979','20/11/1979','20/12/1979'])
depths = [25,25,25]
irr=pd.DataFrame([dates,depths]).T
irr.columns=['Date','Depth']
"""
self.Schedule = pd.DataFrame(columns=['Date','Depth'])
if IrrMethod == 4:
self.NetIrrSMT = 80
if IrrMethod == 5:
self.depth = 0
allowed_keys = {'name','WetSurf','AppEff','MaxIrr','MaxIrrSeason','SMT','IrrInterval','NetIrrSMT','Schedule'}
self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)
# Cell
spec = [
('IrrMethod', int64),
('WetSurf', float64),
('AppEff', float64),
('MaxIrr', float64),
('MaxIrrSeason', float64),
('SMT', float64[:]),
('IrrInterval', int64),
('Schedule', float64[:]),
('NetIrrSMT', float64),
('depth', float64),
]
@jitclass(spec)
class IrrMngtStruct:
"""
"""
def __init__(self,sim_len):
self.IrrMethod=0
self.WetSurf = 100.
self.AppEff = 100.
self.MaxIrr = 25.
self.MaxIrrSeason = 10_000
self.SMT=np.zeros(4)
self.IrrInterval = 0
self.Schedule=np.zeros(sim_len)
self.NetIrrSMT = 80.
self.depth = 0.
# Cell
class FieldMngtClass:
'''
Field Management Class
**Attributes:**\n
`Mulches` : `bool` : Soil surface covered by mulches (Y or N)
`Bunds` : `bool` : Surface bunds present (Y or N)
`CNadj` : `bool` : Field conditions affect curve number (Y or N)
`SRinhb` : `bool` : Management practices fully inhibit surface runoff (Y or N)
`MulchPct` : `float` : Area of soil surface covered by mulches (%)
`fMulch` : `float` : Soil evaporation adjustment factor due to effect of mulches
`zBund` : `float` : Bund height (m)
`BundWater` : `float` : Initial water height in surface bunds (mm)
`CNadjPct` : `float` : Percentage change in curve number (positive or negative)
'''
def __init__(self,Mulches=False,Bunds=False,CNadj=False,SRinhb=False,
MulchPct=50,fMulch=0.5,zBund=0,BundWater=0,
CNadjPct=0):
self.Mulches = Mulches # Soil surface covered by mulches (Y or N)
self.Bunds = Bunds # Surface bunds present (Y or N)
self.CNadj = CNadj # Field conditions affect curve number (Y or N)
self.SRinhb = SRinhb # Management practices fully inhibit surface runoff (Y or N)
self.MulchPct = MulchPct # Area of soil surface covered by mulches (%)
self.fMulch = fMulch # Soil evaporation adjustment factor due to effect of mulches
self.zBund = zBund # Bund height (m)
self.BundWater = BundWater # Initial water height in surface bunds (mm)
self.CNadjPct = CNadjPct # Percentage change in curve number (positive or negative)
# Cell
spec = [
('Mulches', boolean),
('Bunds', boolean),
('CNadj', boolean),
('SRinhb', boolean),
('MulchPct', float64),
('fMulch', float64),
('zBund', float64),
('BundWater', float64),
('CNadjPct', float64),
]
@jitclass(spec)
class FieldMngtStruct:
"""
"""
def __init__(self):
self.Mulches=False
self.Bunds=False
self.CNadj=False
self.SRinhb=False
self.MulchPct = 0.
self.fMulch = 0.
self.zBund = 0.
self.BundWater = 0.
self.CNadjPct = 0.
# Cell
class GwClass:
'''
Ground Water Class stores information on water table params
**Attributes:**\n
`WaterTable` : `str` : Water table considered (Y or N)
`Method` : `str` : Water table input data ('Constant' or 'Variable')
`dates` : `list` : water table observation dates
`values` : `list` : water table observation depths
'''
def __init__(self,WaterTable='N',Method='Constant',dates=[],values=[]):
self.WaterTable = WaterTable
self.Method = Method
self.dates=dates
self.values=values
# Cell
class InitWCClass:
'''
Initial water content Class defines water content at start of sim
**Attributes:**\n
`wc_type` : `str` : Type of value ('Prop' = 'WP'/'FC'/'SAT'; 'Num' = XXX m3/m3; 'Pct' = % TAW))
`Method` : `str` : Method ('Depth' = Interpolate depth points; 'Layer' = Constant value for each soil layer)
`depth_layer` : `list` : location in soil profile (soil layer or depth)
`value` : `list` : value at that location
'''
def __init__(self,wc_type='Prop',Method='Layer',
depth_layer=[1],value=['FC']):
assert len(depth_layer)==len(value)
self.wc_type = wc_type
self.Method = Method
self.depth_layer = depth_layer
self.value = value
# Cell
spec=[
('fshape_b',float64),
('PctZmin',float64),
('fshape_ex',float64),
('ETadj',float64),
('Aer',float64),
('LagAer',int64),
('beta',float64),
('a_Tr',float64),
('GermThr',float64),
('CCmin',float64),
('MaxFlowPct',float64),
('HIini',float64),
('bsted',float64),
('bface',float64),
('CropType',int64),
('PlantMethod',int64),
('CalendarType',int64),
('SwitchGDD',int64),
('EmergenceCD', int64),
('Canopy10PctCD', int64),
('MaxRootingCD', int64),
('SenescenceCD', int64),
('MaturityCD', int64),
('MaxCanopyCD', int64),
('CanopyDevEndCD', int64),
('HIstartCD', int64),
('HIendCD', int64),
('YldFormCD', int64),
('Emergence',float64),
('MaxRooting',float64),
('Senescence',float64),
('Maturity',float64),
('HIstart',float64),
('Flowering',float64),
('YldForm',float64),
('HIend',float64),
('CanopyDevEnd',float64),
('MaxCanopy',float64),
('GDDmethod',int64),
('Tbase',float64),
('Tupp',float64),
('PolHeatStress',int64),
('Tmax_up',float64),
('Tmax_lo',float64),
('PolColdStress',int64),
('Tmin_up',float64),
('Tmin_lo',float64),
('TrColdStress',int64),
('GDD_up',float64),
('GDD_lo',float64),
('Zmin',float64),
('Zmax',float64),
('fshape_r',float64),
('SxTopQ',float64),
('SxBotQ',float64),
('SxTop',float64),
('SxBot',float64),
('SeedSize',float64),
('PlantPop',int64),
('CCx',float64),
('CDC',float64),
('CGC',float64),
('Kcb',float64),
('fage',float64),
('WP',float64),
('WPy',float64),
('fsink',float64),
('HI0',float64),
('dHI_pre',float64),
('a_HI',float64),
('b_HI',float64),
('dHI0',float64),
('Determinant',int64),
('exc',float64),
('p_up',float64[:]),
('p_lo',float64[:]),
('fshape_w',float64[:]),
('Canopy10Pct',int64),
('CC0',float64),
('HIGC',float64),
('tLinSwitch',int64),
('dHILinear',float64),
('fCO2',float64),
('FloweringCD',int64),
('FloweringEnd',float64),
]
@jitclass(spec)
class CropStruct(object):
'''
The Crop Class contains Paramaters and variables of the crop used in the simulation
**Attributes**:\n
'''
def __init__(self,):
# Assign default program properties (should not be changed without expert knowledge)
self.fshape_b = 13.8135 # Shape factor describing the reduction in biomass production for insufficient growing degree days
self.PctZmin = 70 # Initial percentage of minimum effective rooting depth
self.fshape_ex = -6 # Shape factor describing the effects of water stress on root expansion
self.ETadj = 1 # Adjustment to water stress thresholds depending on daily ET0 (0 = No, 1 = Yes)
self.Aer = 5 # Vol (%) below saturation at which stress begins to occur due to deficient aeration
self.LagAer = 3 # Number of days lag before aeration stress affects crop growth
self.beta = 12 # Reduction (%) to p_lo3 when early canopy senescence is triggered
self.a_Tr = 1 # Exponent parameter for adjustment of Kcx once senescence is triggered
self.GermThr = 0.2 # Proportion of total water storage needed for crop to germinate
self.CCmin = 0.05 # Minimum canopy size below which yield formation cannot occur
self.MaxFlowPct = 100/3 # Proportion of total flowering time (%) at which peak flowering occurs
self.HIini = 0.01 # Initial harvest index
self.bsted = 0.000138 # WP co2 adjustment parameter given by Steduto et al. 2007
self.bface = 0.001165 # WP co2 adjustment parameter given by FACE experiments
# added in Read_Model_Paramaters
self.CropType= 3 # Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
self.PlantMethod= 1 # Planting method (0 = Transplanted, 1 = Sown)
self.CalendarType= 2 # Calendar Type (1 = Calendar days, 2 = Growing degree days)
self.SwitchGDD= 0 # Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
self.EmergenceCD = 0
self.Canopy10PctCD = 0
self.MaxRootingCD = 0
self.SenescenceCD = 0
self.MaturityCD = 0
self.MaxCanopyCD = 0
self.CanopyDevEndCD = 0
self.HIstartCD = 0
self.HIendCD = 0
self.YldFormCD = 0
self.Emergence = 80 # Growing degree/Calendar days from sowing to emergence/transplant recovery
self.MaxRooting = 1420 # Growing degree/Calendar days from sowing to maximum rooting
self.Senescence = 1420 # Growing degree/Calendar days from sowing to senescence
self.Maturity = 1670 # Growing degree/Calendar days from sowing to maturity
self.HIstart = 850 # Growing degree/Calendar days from sowing to start of yield formation
self.Flowering = 190 # Duration of flowering in growing degree/calendar days (-999 for non-fruit/grain crops)
self.YldForm = 775 # Duration of yield formation in growing degree/calendar days
self.HIend = 0
self.MaxCanopy = 0
self.CanopyDevEnd = 0
self.Canopy10Pct = 0
self.GDDmethod = 2 # Growing degree day calculation method
self.Tbase = 8 # Base temperature (degC) below which growth does not progress
self.Tupp = 30 # Upper temperature (degC) above which crop development no longer increases
self.PolHeatStress = 1 # Pollination affected by heat stress (0 = No, 1 = Yes)
self.Tmax_up = 40 # Maximum air temperature (degC) above which pollination begins to fail
self.Tmax_lo = 45 # Maximum air temperature (degC) at which pollination completely fails
self.PolColdStress = 1 # Pollination affected by cold stress (0 = No, 1 = Yes)
self.Tmin_up = 10 # Minimum air temperature (degC) below which pollination begins to fail
self.Tmin_lo = 5 # Minimum air temperature (degC) at which pollination completely fails
self.TrColdStress = 1 # Transpiration affected by cold temperature stress (0 = No, 1 = Yes)
self.GDD_up = 12 # Minimum growing degree days (degC/day) required for full crop transpiration potential
self.GDD_lo = 0 # Growing degree days (degC/day) at which no crop transpiration occurs
self.Zmin = 0.3 # Minimum effective rooting depth (m)
self.Zmax = 1.7 # Maximum rooting depth (m)
self.fshape_r = 1.3 # Shape factor describing root expansion
self.SxTopQ = 0.0480 # Maximum root water extraction at top of the root zone (m3/m3/day)
self.SxBotQ = 0.0117 # Maximum root water extraction at the bottom of the root zone (m3/m3/day)
self.SxTop = 0.
self.SxBot = 0.
self.SeedSize = 6.5 # Soil surface area (cm2) covered by an individual seedling at 90% emergence
self.PlantPop = 75_000 # Number of plants per hectare
self.CCx = 0.96 # Maximum canopy cover (fraction of soil cover)
self.CDC = 0.01 # Canopy decline coefficient (fraction per GDD/calendar day)
self.CGC = 0.0125 # Canopy growth coefficient (fraction per GDD)
self.Kcb = 1.05 # Crop coefficient when canopy growth is complete but prior to senescence
self.fage = 0.3 # Decline of crop coefficient due to ageing (%/day)
self.WP = 33.7 # Water productivity normalized for ET0 and C02 (g/m2)
self.WPy = 100 # Adjustment of water productivity in yield formation stage (% of WP)
self.fsink = 0.5 # Crop performance under elevated atmospheric CO2 concentration (%/100)
self.HI0 = 0.48 # Reference harvest index
self.dHI_pre = 0 # Possible increase of harvest index due to water stress before flowering (%)
self.a_HI = 7 # Coefficient describing positive impact on harvest index of restricted vegetative growth during yield formation
self.b_HI = 3 # Coefficient describing negative impact on harvest index of stomatal closure during yield formation
self.dHI0 = 15 # Maximum allowable increase of harvest index above reference value
self.Determinant = 1 # Crop Determinancy (0 = Indeterminant, 1 = Determinant)
self.exc = 50 # Excess of potential fruits
self.p_up = np.zeros(4) # Upper soil water depletion threshold for water stress effects on affect canopy expansion
self.p_lo = np.zeros(4) # Lower soil water depletion threshold for water stress effects on canopy expansion
self.fshape_w = np.ones(4) # Shape factor describing water stress effects on canopy expansion
self.CC0 = 0.
self.HIGC = 0.
self.tLinSwitch = 0
self.dHILinear = 0.
self.fCO2 = 0.
self.FloweringCD = 0
self.FloweringEnd=0.
# Cell
spec=[
('AgeDays', float64),
('AgeDays_NS', float64),
('AerDays', float64),
('AerDaysComp', float64[:]),
('IrrCum', float64),
('DelayedGDDs', float64),
('DelayedCDs', float64),
('PctLagPhase', float64),
('tEarlySen', float64),
('GDDcum', float64),
('DaySubmerged', float64),
('IrrNetCum', float64),
('DAP', int64),
('Epot', float64),
('Tpot', float64),
('PreAdj', boolean),
('CropMature', boolean),
('CropDead', boolean),
('Germination', boolean),
('PrematSenes', boolean),
('HarvestFlag', boolean),
('GrowingSeason', boolean),
('YieldForm', boolean),
('Stage2', boolean),
('WTinSoil', boolean),
('Stage', float64),
('Fpre', float64),
('Fpost', float64),
('fpost_dwn', float64),
('fpost_upp', float64),
('HIcor_Asum', float64),
('HIcor_Bsum', float64),
('Fpol', float64),
('sCor1', float64),
('sCor2', float64),
('HIref', float64),
('GrowthStage', float64),
('TrRatio', float64),
('rCor', float64),
('CC', float64),
('CCadj', float64),
('CC_NS', float64),
('CCadj_NS', float64),
('B', float64),
('B_NS', float64),
('HI', float64),
('HIadj', float64),
('CCxAct', float64),
('CCxAct_NS', float64),
('CCxW', float64),
('CCxW_NS', float64),
('CCxEarlySen', float64),
('CCprev', float64),
('ProtectedSeed', int64),
('Y', float64),
('Zroot', float64),
('CC0adj', float64),
('SurfaceStorage', float64),
('zGW', float64),
('th_fc_Adj', float64[:]),
('th', float64[:]),
('thini', float64[:]),
('TimeStepCounter', int64),
('P', float64),
('Tmax', float64),
('Tmin', float64),
('Et0', float64),
('GDD', float64),
('Wsurf', float64),
('EvapZ', float64),
('Wstage2', float64),
('Depletion', float64),
('TAW', float64),
]
@jitclass(spec)
class InitCondClass:
'''
The InitCond Class contains all Paramaters and variables used in the simulation
updated each timestep with the name NewCond
'''
def __init__(self,num_comp):
# counters
self.AgeDays = 0
self.AgeDays_NS = 0
self.AerDays = 0
self.AerDaysComp = np.zeros(num_comp)
self.IrrCum = 0
self.DelayedGDDs = 0
self.DelayedCDs = 0
self.PctLagPhase = 0
self.tEarlySen = 0
self.GDDcum = 0
self.DaySubmerged = 0
self.IrrNetCum = 0
self.DAP = 0
self.Epot = 0
self.Tpot = 0
# States
self.PreAdj = False
self.CropMature = False
self.CropDead = False
self.Germination = False
self.PrematSenes = False
self.HarvestFlag = False
self.GrowingSeason = False
self.YieldForm = False
self.Stage2 = False
self.WTinSoil = False
# HI
self.Stage = 1
self.Fpre = 1
self.Fpost = 1
self.fpost_dwn = 1
self.fpost_upp = 1
self.HIcor_Asum = 0
self.HIcor_Bsum = 0
self.Fpol = 0
self.sCor1 = 0
self.sCor2 = 0
self.HIref = 0.
# GS
self.GrowthStage = 0
#Transpiration
self.TrRatio = 1
# crop growth
self.rCor = 1
self.CC = 0
self.CCadj = 0
self.CC_NS = 0
self.CCadj_NS = 0
self.B = 0
self.B_NS = 0
self.HI = 0
self.HIadj = 0
self.CCxAct = 0
self.CCxAct_NS = 0
self.CCxW = 0
self.CCxW_NS = 0
self.CCxEarlySen = 0
self.CCprev = 0
self.ProtectedSeed = 0
self.Y = 0
self.Zroot = 0
self.CC0adj = 0
self.SurfaceStorage = 0
self.zGW = -999
self.th_fc_Adj = np.zeros(num_comp)
self.th = np.zeros(num_comp)
self.thini = np.zeros(num_comp)
self.TimeStepCounter=0
self.P=0
self.Tmax=0
self.Tmin=0
self.Et0=0
self.GDD=0
self.Wsurf=0
self.EvapZ=0
self.Wstage2=0
self.Depletion=0
self.TAW=0
# Cell
spec = [
('Act', float64),
('Sat', float64),
('Fc', float64),
('Wp', float64),
('Dry', float64),
]
@jitclass(spec)
class WevapClass(object):
"""
stores soil water contents in the evaporation layer
**Attributes:**\n
`Sat` : `float` : Water storage in evaporation layer at saturation (mm)
`Fc` : `float` : Water storage in evaporation layer at Field Capacity (mm)
`Wp` : `float`: Water storage in evaporation layer at Wilting Point (mm)
`Dry` : `float` : Water storage in evaporation layer at air dry (mm)
`Act` : `float` : Actual Water storage in evaporation layer (mm)
"""
def __init__(self):
self.Sat = 0.
self.Fc = 0.
self.Wp = 0.
self.Dry = 0.
self.Act = 0.
# Cell
spec = [
('Comp', int64[:]),
('dz', float64[:]),
('Layer', int64[:]),
('dzsum', float64[:]),
('th_fc', float64[:]),
('Layer_dz', float64[:]),
('th_s', float64[:]),
('th_wp', float64[:]),
('Ksat', float64[:]),
('Penetrability', float64[:]),
('th_dry', float64[:]),
('tau', float64[:]),
('zBot', float64[:]),
('zTop', float64[:]),
('zMid', float64[:]),
('th_fc_Adj', float64[:]),
('aCR', float64[:]),
('bCR', float64[:]),
]
@jitclass(spec)
class SoilProfileClass:
"""
**Attributes:**\n
`Comp` : `list` :
`Layer` : `list` :
`dz` : `list` :
`dzsum` : `list` :
`zBot` : `list` :
`zTop` : `list` :
`zMid` : `list` :
"""
def __init__(self,length):
self.Comp = np.zeros(length,dtype=int64)
self.dz = np.zeros(length,dtype=float64)
self.Layer = np.zeros(length,dtype=int64)
self.dzsum = np.zeros(length,dtype=float64)
self.th_fc = np.zeros(length,dtype=float64)
self.Layer_dz = np.zeros(length,dtype=float64)
self.th_s = np.zeros(length,dtype=float64)
self.th_wp = np.zeros(length,dtype=float64)
self.Ksat = np.zeros(length,dtype=float64)
self.Penetrability = np.zeros(length,dtype=float64)
self.th_dry = np.zeros(length,dtype=float64)
self.tau = np.zeros(length,dtype=float64)
self.zBot = np.zeros(length,dtype=float64)
self.zTop = np.zeros(length,dtype=float64)
self.zMid = np.zeros(length,dtype=float64)
self.th_fc_Adj = np.zeros(length,dtype=float64)
self.aCR = np.zeros(length,dtype=float64)
self.bCR = np.zeros(length,dtype=float64)
# Cell
spec = [
('Rz', float64),
('Zt', float64),
]
@jitclass(spec)
class TAWClass:
"""
**Attributes:**\n
`Rz` : `float` : .
`Zt` : `float` : .
"""
def __init__(self):
self.Rz = 0.
self.Zt = 0.
# Cell
spec = [
('Rz', float64),
('Zt', float64),
]
@jitclass(spec)
class DrClass:
"""
**Attributes:**\n
`Rz` : `float` : .
`Zt` : `float` : .
"""
def __init__(self):
self.Rz = 0.
self.Zt = 0.
# Cell
spec = [
('Act', float64),
('S', float64),
('FC', float64),
('WP', float64),
('Dry', float64),
('Aer', float64),
]
@jitclass(spec)
class thRZClass(object):
"""
root zone water content
**Attributes:**\n
`Act` : `float` : .
`S` : `float` : .
`FC` : `float` : .
`WP` : `float` : .
`Dry` : `float` : .
`Aer` : `float` : .
"""
def __init__(self):
self.Act = 0.
self.S = 0.
self.FC = 0.
self.WP = 0.
self.Dry = 0.
self.Aer = 0.
# Cell
spec = [
('Exp', float64),
('Sto', float64),
('Sen', float64),
('Pol', float64),
('StoLin', float64),
]
@jitclass(spec)
class KswClass(object):
"""
water stress coefficients
**Attributes:**\n
`Exp` : `float` : .
`Sto` : `float` : .
`Sen` : `float` : .
`Pol` : `float` : .
`StoLin` : `float` : .
"""
def __init__(self):
self.Exp = 1.
self.Sto = 1.
self.Sen = 1.
self.Pol = 1.
self.StoLin = 1.
# Cell
spec = [
('PolH', float64),
('PolC', float64),
]
@jitclass(spec)
class KstClass(object):
"""
temperature stress coefficients
**Attributes:**\n
`PolH` : `float` : heat stress
`PolC` : `float` : cold stress
"""
def __init__(self):
self.PolH = 1.
self.PolC = 1.
# Cell
spec = [
('RefConc', float64),
('CurrentConc', float64),
]
@jitclass(spec)
class CO2Class(object):
"""
**Attributes:**\n
`RefConc` : `float` : reference CO2 concentration
`CurrentConc` : `float` : current CO2 concentration
"""
def __init__(self):
self.RefConc = 369.41
self.CurrentConc = 0. | [
"tomk10tk@gmail.com"
] | tomk10tk@gmail.com |
661cac8acf0eadfcb8a1d63605e97bdbdb2e9740 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/TestScript/UI/suite_UI_62/tst_UI_62_Cellular_design/test.py | 4b116d08c137cfe84f4e37aea4edc7de3cf116e4 | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | ######################
#Author: Alex Leung ##
######################
from API.Utility import UtilConst
from API.Utility.Util import Util
from API.ComponentBox import ComponentBoxConst
from API.Device.EndDevice.PC.PC import PC
from API.Device.CellTower.CellTower import CellTower
from API.Device.COServer.COServer import COServer
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbarConst import GoldenPhysicalToolbarConst
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbar import GoldenPhysicalToolbar
from API.SimulationPanel.EventList.EventList import EventList
from API.SimulationPanel.PlayControls.PlayControls import PlayControls
from API.functions import check
from API.Workspace.Physical import Physical
from API.Device.DeviceBase.ServicesBase.ServicesBaseConst import ServicesConst
#function initialization
util = Util()
pda0 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 100, "Pda0")
pda1 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 200, "Pda1")
ct = CellTower(ComponentBoxConst.DeviceModel.CELL_TOWER, 100, 100, "Cell Tower0")
cos = COServer(ComponentBoxConst.DeviceModel.CO_SERVER, 100, 200, "Central OfficeServer0")
gpt = GoldenPhysicalToolbar()
gptc = GoldenPhysicalToolbarConst()
def main():
util.init()
maketop()
checksettings()
movephysical()
def maketop():
pda0.create()
pda1.create()
ct.create()
cos.create()
ct.connect(cos, ComponentBoxConst.Connection.CONN_COAXIAL, "Coaxial0", "Coaxial0/0")
util.speedUpConvergence()
def checksettings():
ct.select()
ct.clickConfigTab()
ct.close()
cos.select()
cos.clickConfigTab()
cos.config.selectInterface('Cell Tower')
cos.config.interface.cellTower.check.ip("172.16.1.1")
cos.config.interface.cellTower.check.subnet('255.255.255.0')
cos.config.interface.cellTower.check.ipv6("2001::1")
cos.config.interface.cellTower.check.subnetv6("64")
cos.config.interface.cellTower.check.linkLocal("FE80::[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}")
cos.clickServicesTab()
cos.services.selectInterface('DHCP')
cos.services.dhcp.check.ip("172.16.1.1")
cos.services.dhcp.check.subnet("255.255.255.0")
cos.services.dhcp.check.startIp1("172")
cos.services.dhcp.check.startIp2('16')
cos.services.dhcp.check.startIp3('1')
cos.services.dhcp.check.startIp4('100')
cos.services.dhcp.check.maxUsers('50')
cos.services.selectInterface('DHCPv6')
#cos.services.dhcpv6.on()
cos.services.dhcpv6.check.on(True)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.PREFIX_TABLE).rowCount, 1)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.LOCAL_TABLE).rowCount, 1)
cos.services.selectInterface("CELL TOWER")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.refreshButton()
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.clickItem("0/0")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_DEVICE_LIST).rowCount, 2)
cos.services.selectInterface("PAP/CHAP")
cos.close()
def movephysical():
util.clickOnPhysical()
gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1.Home City.Corporate Office.Smartphone0")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 409)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 818)
gpt.clickButton(gptc.MOVE_OBJECT)
util.clickOnPhysicalWorkspace(172, 215)
#mouseClick(waitForObject(gptc.TABLE1_DEVICE1), 39, 848, 0, Qt.LeftButton)
#sendEvent("QMouseEvent", waitForObject(gptc.TABLE1_DEVICE1), QEvent.MouseButtonRelease, 38, 95, Qt.LeftButton, 0, 0)
activateItem(waitForObjectItem(gptc.MOVE_DROPDOWN, "Move to Intercity"))
snooze(5)
#gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 500, 300)
util.clickOnLogical()
pda0.select()
pda0.clickDesktopTab()
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText("ping 172.16.1.1")
util.fastForwardTime()
pda0.desktop.commandPrompt.textCheckPoint("Received = 0", 1)
#checkpoint phone outside range
#checkpoint phone not getting reception
pda0.close()
util.clickOnPhysical()
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 200, 200)
util.clickOnLogical()
util.clickOnSimulation()
pda0.select()
pda0.clickTab('Desktop')
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText('ping 172.16.255.255')
PlayControls().captureForward(10)
foundEvent = []
foundEvent.append(EventList().findEventAt('Smartphone0', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Smartphone1', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Central Office Server0', 'Cell Tower0', 'ICMP'))
check(not False in foundEvent)
| [
"ptqatester1@gmail.com"
] | ptqatester1@gmail.com |
a270947c1b4f962a0d9e5be8ec990bbefd2b4a32 | 3a39ddc4a8600ffc5110453867370c1d8e2da121 | /x11-libs/libXcomposite/libXcomposite-0.4.3.py | 8ce4b041dc0124e9f86b8c9c3514052f3dd809a7 | [] | no_license | seqizz/hadron64 | f2276133786c62f490bdc0cbb6801491c788520f | ca6ef5df3972b925f38e3666ccdc20f2d0bfe87e | refs/heads/master | 2021-01-18T04:53:09.597388 | 2013-02-25T21:25:32 | 2013-02-25T21:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | metadata = """
summary @ X11 Composite extension library
homepage @ http://xorg.freedesktop.org/
license @ MIT
src_url @ http://xorg.freedesktop.org/releases/individual/lib/libXcomposite-$version.tar.bz2
arch @ ~x86
"""
depends = """
runtime @ x11-libs/libXfixes x11-proto/compositeproto
"""
#srcdir = "libXcomposite-%s" % version
def configure():
conf(
"--disable-static")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING")
| [
"bburaksezer@gmail.com"
] | bburaksezer@gmail.com |
3f61ca63f7dff361e526f2cf6d6398aa22d96de4 | 398815d5f6afc295a09badb4ea12f4911aaf39ba | /flasky/migrations/versions/f4efa5ae67c7_.py | 46d8ea1f5ec1fa3f06925c65221bb3646cf3138e | [] | no_license | mookrs/laboratory | 920cc92e116a26d632ec3e8d79438bdd80c4595c | 797191caaf4b15b8e87ec7bc103e25c7244c6b05 | refs/heads/master | 2021-01-17T01:57:10.123565 | 2018-10-02T09:55:51 | 2018-10-02T09:55:51 | 31,777,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | """empty message
Revision ID: f4efa5ae67c7
Revises: f4fb3aa6b327
Create Date: 2016-08-19 11:11:31.888282
"""
# revision identifiers, used by Alembic.
revision = 'f4efa5ae67c7'
down_revision = 'f4fb3aa6b327'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
### end Alembic commands ###
| [
"mookrs@gmail.com"
] | mookrs@gmail.com |
d850a1c802074e8537dcabef5abf6157fab5c3a0 | db5e84b22b184242b406bb689b84dba4813b179d | /package_example/example_package/__init__.py | ea725f6c05a849f2cdf0a9597195459448fe1733 | [] | no_license | ErikBjare/python-examples | 46b23629d380efe94ae965256083060f616031c1 | 0f030392bfdf17b93dd14844cb5c35c09f6e8840 | refs/heads/master | 2021-01-16T18:19:14.676416 | 2015-05-08T11:28:40 | 2015-05-08T11:28:40 | 35,238,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # If you want to import all variables in the functions.py file directly into the example_package namespace, use this
from .functions import *
# If you want to import the functions.py file into it's own sub-namespace (example_package.functions), use this
from . import functions
| [
"erik.bjareholt@gmail.com"
] | erik.bjareholt@gmail.com |
fe1fc30924e2f2f7aaddfc9e65cfe82ff258f58f | b27f5bff09fab83d2a5970034cd2c2351a8346f2 | /outsource/migrations/0007_auto_20170726_1838.py | 37c8a92c36f8a3c4cf274b86c7bb7e7850228f05 | [] | no_license | cafemoa/takeit_server | 079b4561c1c970a6fa5f508a54fb84d6c8d63610 | fa674ae25d8eb3671f2f73ef43fee7744d257814 | refs/heads/master | 2022-12-10T20:41:17.716496 | 2019-01-14T17:47:50 | 2019-01-14T17:47:50 | 100,565,936 | 1 | 0 | null | 2022-11-22T01:45:43 | 2017-08-17T05:44:01 | JavaScript | UTF-8 | Python | false | false | 592 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-26 09:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('outsource', '0006_mydevice'),
]
operations = [
migrations.AlterField(
model_name='mydevice',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='device', to=settings.AUTH_USER_MODEL),
),
]
| [
"canon0115@naver.com"
] | canon0115@naver.com |
3ce8513dc49bbd7f39174d24d24c9ef059686e0d | 805fe894bbe1d4072a2b083c4d874d0566cd69d0 | /woocommerce.py | 36e1aeb5ac57b29aa5754c1182b1e8941922b6c0 | [] | no_license | Popss2701/Lite | dce6d354008d69c37f2fb20689315a5356f8b0a9 | 817a322bdcca80bc1f9ce0b25e3aa6dd97e70007 | refs/heads/master | 2023-03-27T11:46:55.768279 | 2021-03-23T18:48:37 | 2021-03-23T18:48:37 | 290,103,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271,230 | py | import copy
import csv
import re
from collections import defaultdict
from urllib.parse import unquote
import phpserialize
import chardet
from cartmigration.libs.utils import *
from cartmigration.models.cart.wordpress import LeCartWordpress
# tested with woocommerce335
class LeCartWoocommerce(LeCartWordpress):
WARNING_VARIANT_LIMIT = 100
def __init__(self, data = None):
super().__init__(data)
self.product_types = dict()
self.is_variant_limit = False
def display_config_source(self):
parent = super().display_config_source()
url_query = self.get_connector_url('query')
self._notice['src']['language_default'] = 1
self._notice['src']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['src']['language_default']] = 0
self._notice['src']['store_category'] = storage_cat_data
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['category_map'] = False
self._notice['src']['support']['attribute_map'] = False
self._notice['src']['support']['wpml'] = False
self._notice['src']['support']['yoast_seo'] = False
self._notice['src']['support']['manufacturers'] = False
self._notice['src']['support']['product_bundle'] = False
self._notice['src']['support']['customer_point_rewards'] = False
self._notice['src']['support']['addons'] = False
self._notice['src']['support']['plugin_pre_ord'] = False
self._notice['src']['support']['plugin_order_status'] = False
self._notice['src']['support']['custom_order_status'] = False
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data:
if "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['src']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['src']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
else:
self._notice['src']['support']['wpml'] = False
if 'woocommerce-brand/main.php' in active_plugin_v_data or "wc-brand/woocommerce-brand.php" in active_plugin_v_data or 'woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data or 'perfect-woocommerce-brands/perfect-woocommerce-brands.php' in active_plugin_v_data:
self._notice['src']['support']['manufacturers'] = True
if "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['src']['support']['yoast_seo'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['src']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['src']['support']['customer_point_rewards'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['src']['support']['addons'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['src']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and 'woocommerce-order-status-manager/woocommerce-order-status-manager.php' in active_plugin_v_data:
self._notice['src']['support']['plugin_order_status'] = True
if active_plugin_v_data and 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['src']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
},
'permalink_structure': {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE option_name = 'woocommerce_permalinks' OR option_name = 'category_base'",
}
}
if self._notice['src']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG'"
}
if self._notice['src']['support']['plugin_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_order_status'"
if self._notice['src']['support']['custom_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
config = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(queries_config)
})
language_data = dict()
order_status_data = dict()
product_base = 'product'
product_category_base = 'product-category'
category_base = ''
if config and config['result'] == 'success':
if config['data']['orders_status']:
for order_status_row in config['data']['orders_status']:
# order_status_id = 'wc-' + order_status_row['name'].lower()
# order_status_data[order_status_id] = order_status_row['name']
if self._notice['src']['support']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif self._notice['src']['support']['plugin_order_status']:
order_status_id = order_status_row['post_name']
order_status_data[order_status_id] = order_status_row['post_title']
else:
order_status_id = order_status_row['post_status']
order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['src']['support']['wpml']:
if not self._notice['src']['language_default'] and 'default_lang' in config['data'] and config['data'][
'default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['src']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
if config['data']['permalink_structure']:
product_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'woocommerce_permalinks')
category_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'category_base')
if product_base_data:
option_value_data = php_unserialize(product_base_data['option_value'])
if option_value_data:
product_base = get_value_by_key_in_dict(option_value_data, 'product_base', 'product')
product_category_base = get_value_by_key_in_dict(option_value_data, 'category_base', 'product-category')
if category_base_data:
category_base = category_base_data['option_value']
self._notice['src']['config']['category_base'] = product_category_base
self._notice['src']['config']['product_category_base'] = product_category_base
self._notice['src']['config']['product_base'] = product_base
self._notice['src']['support']['language_map'] = True
self._notice['src']['languages'] = language_data
self._notice['src']['order_status'] = order_status_data
self._notice['src']['support']['order_status_map'] = True
self._notice['src']['support']['country_map'] = False
self._notice['src']['support']['add_new'] = True
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['customer_group_map'] = False
self._notice['src']['support']['languages_select'] = True
self._notice['src']['support']['order_state_map'] = True
self._notice['src']['support']['seo'] = True
if self.is_woo2woo():
self._notice['src']['support']['cus_pass'] = False
else:
self._notice['src']['support']['cus_pass'] = True
self._notice['src']['support']['coupons'] = True
self._notice['src']['support']['pages'] = True
self._notice['src']['support']['seo_301'] = True
self._notice['src']['config']['seo_module'] = self.get_list_seo()
return response_success()
def display_config_target(self):
url_query = self.get_connector_url('query')
self._notice['target']['language_default'] = 1
self._notice['target']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['target']['language_default']] = 0
self._notice['target']['store_category'] = storage_cat_data
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['category_map'] = False
self._notice['target']['support']['attribute_map'] = False
self._notice['target']['support']['wpml'] = False
self._notice['target']['support']['wpml_currency'] = False
self._notice['target']['support']['product_bundle'] = False
self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['addons'] = False
self._notice['target']['support']['customer_point_rewards'] = False
self._notice['target']['support']['polylang'] = False
self._notice['target']['support']['polylang_product'] = False
self._notice['target']['support']['polylang_category'] = False
self._notice['target']['support']['plugin_woo_admin'] = False
self._notice['target']['support']['custom_order_status'] = False
self._notice['target']['currency_map'] = dict()
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
query_active_currency = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = '_wcml_settings'"
}
options_currency_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_currency)})
if options_currency_data and options_currency_data['data']:
currency_value = php_unserialize(options_currency_data['data'][0]['option_value'])
if currency_value and 'enable_multi_currency' in currency_value and to_int(currency_value['enable_multi_currency']) >= 2:
self._notice['target']['support']['wpml_currency'] = True
if 'default_currencies' in currency_value and currency_value['default_currencies']:
self._notice['target']['currency_map'] = currency_value['default_currencies']
else:
self._notice['target']['support']['wpml_currency'] = False
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'martfury-addons/martfury-addons.php', 'taxonomy': 'product_brand'},
{'name': 'woocommerce-brands/woocommerce-brands.php', 'taxonomy': 'product_brand'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
{'name': 'perfect-woocommerce-brands/perfect-woocommerce-brands.php', 'taxonomy': 'pwb-brand'},
]
self._notice['target']['config']['brand_taxonomy'] = 'product_brand'
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = True
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
# if ('woocommerce-brand/main.php' in active_plugin_v_data) or ("wc-brand/woocommerce-brand.php" in active_plugin_v_data) or ('woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data) or ('brands-for-woocommerce/woocommerce-brand.php' in active_plugin_v_data):
# self._notice['target']['support']['manufacturers'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
# query_check_seo = {
# 'type': 'select',
# 'query': "SHOW TABLES LIKE '_DBPRF_lecm_rewrite';"
# }
# check_table_exit = self.select_data_connector(query_check_seo, 'seo')
# if check_table_exit['result'] == 'success' and to_len(check_table_exit['data']) > 0:
# self._notice['target']['support']['seo_301'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['target']['support']['customer_point_rewards'] = True
# if 'polylang/polylang.php' in active_plugin_v_data and 'polylang-wc/polylang-wc.php' in active_plugin_v_data:
if 'polylang/polylang.php' in active_plugin_v_data:
self._notice['target']['support']['polylang'] = True
if 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['target']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
},
}
if self._notice['target']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG' and o.`option_value` != '' "
}
if self._notice['target']['support']['polylang']:
queries_config['polylang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'language'"
}
queries_config['polylang_categories'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'term_language'"
}
if self._notice['target']['support']['custom_order_status']:
queries_config['custom_order_status'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
}
config = self.select_multiple_data_connector(queries_config)
if 'polylang' in config['data'] and not config['data']['polylang']:
self._notice['target']['support']['polylang'] = False
language_data = dict()
order_status_data = dict()
polylang_products = dict()
polylang_categories = dict()
if config and config['result'] == 'success':
if self._notice['target']['support']['custom_order_status'] and config['data']['custom_order_status'] and to_len(config['data']['custom_order_status']) > 0:
for order_status_row in config['data']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif config['data']['orders_status'] and to_len(config['data']['orders_status']) > 0:
for order_status_row in config['data']['orders_status']:
order_status_id = 'wc-' + to_str(order_status_row['name']).lower()
order_status_data[order_status_id] = order_status_row['name']
# order_status_id = order_status_row['post_status']
# order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['target']['support']['wpml']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
elif self._notice['target']['support']['polylang']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'polylang' in config['data']:
if config['data']['polylang']:
self._notice['target']['language_default'] = 'en'
for lang_row in config['data']['polylang']:
lang_id = lang_row['slug']
language_data[lang_id] = lang_row['name']
lang_product = lang_row['slug']
polylang_products[lang_product] = lang_row['term_taxonomy_id']
if config['data']['polylang_categories']:
for lang_row in config['data']['polylang_categories']:
lang_category = lang_row['slug'].replace('pll_', '')
polylang_categories[lang_category] = lang_row['term_taxonomy_id']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
lang_id = 1
language_data[lang_id] = "Default language"
self._notice['target']['support']['manufacturers'] = True
self._notice['target']['support']['check_manufacturers'] = True
# self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['pre_ord'] = True
self._notice['target']['support']['check_pre_ord'] = True
self._notice['target']['support']['seo'] = True
self._notice['target']['support']['check_seo'] = True
self._notice['target']['support']['seo_301'] = True
self._notice['target']['support']['check_seo_301'] = True
self._notice['target']['support']['cus_pass'] = True
self._notice['target']['support']['check_cus_pass'] = True
self._notice['target']['support']['language_map'] = True
self._notice['target']['languages'] = language_data
self._notice['target']['order_status'] = order_status_data
self._notice['target']['support']['order_status_map'] = True
self._notice['target']['support']['country_map'] = False
self._notice['target']['support']['add_new'] = True
self._notice['target']['support']['coupons'] = True
self._notice['target']['support']['blogs'] = True
self._notice['target']['support']['pages'] = True
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['pre_prd'] = False
self._notice['target']['support']['pre_cus'] = False
self._notice['target']['support']['img_des'] = True
self._notice['target']['support']['customer_group_map'] = False
self._notice['target']['support']['languages_select'] = True
self._notice['target']['support']['update_latest_data'] = True
self._notice['target']['config']['entity_update']['products'] = True
self._notice['target']['support']['polylang_product'] = polylang_products
self._notice['target']['support']['polylang_category'] = polylang_categories
return response_success()
def get_query_display_import_source(self, update = False):
compare_condition = ' > '
if update:
compare_condition = ' <= '
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
queries = {
# 'taxes': {
# 'type': 'select',
# 'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'",
# },
'manufacturers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE (taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand') AND term_id " + compare_condition + to_str(self._notice['process']['manufacturers']['id_src']),
},
'categories': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE taxonomy = 'product_cat' AND term_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
},
'products': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['products']['id_src']),
},
'customers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND u.ID " + compare_condition + to_str(
self._notice['process']['customers']['id_src']),
},
'orders': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['orders']['id_src']),
},
'reviews': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_comments AS cm,_DBPRF_posts AS p WHERE cm.comment_post_ID = p.ID AND p.post_type = 'product' AND cm.comment_ID " + compare_condition + to_str(
self._notice['process']['reviews']['id_src']),
},
'pages': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'page' AND ID " + compare_condition + to_str(self._notice['process']['pages']['id_src']),
},
'coupons': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_coupon' AND ID " + compare_condition + to_str(self._notice['process']['coupons']['id_src']),
},
'blogs': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'post' AND ID " + compare_condition + to_str(self._notice['process']['blogs']['id_src']),
},
}
if self._notice['src']['support']['wpml']:
queries['categories'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# self._notice['process']['categories']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_taxonomy_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
}
queries['products'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE p.`ID` and il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID > " + to_str(
# self._notice['process']['products']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID " + compare_condition + to_str(self._notice['process']['products']['id_src']),
}
return queries
def display_import_source(self):
if self._notice['config']['add_new']:
self.display_recent_data()
queries = self.get_query_display_import_source()
count = self.get_connector_data(self.get_connector_url('query'), {
'serialize': True,
'query': json.dumps(queries)
})
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total'] = total
return response_success()
def display_update_source(self):
queries = self.get_query_display_import_source(True)
count = self.select_multiple_data_connector(queries, 'count')
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total_update'] = total
return response_success()
def display_import_target(self):
return response_success()
def prepare_import_target(self):
parent = super().prepare_import_target()
if parent['result'] != 'success':
return parent
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'woocommerce-brands/woocommerce-brands.php'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
]
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = False
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if self._notice['target']['support']['wpml'] or self._notice['target']['support'].get('polylang'):
add_column = "ALTER TABLE " + self.get_table_name(TABLE_MAP) + " ADD `lang` VARCHAR(255)"
self.query_raw(add_column)
add_column = "ALTER TABLE _DBPRF_lecm_rewrite ADD `lang` VARCHAR(255)"
self.query_data_connector({'type': 'query', 'query': add_column})
return response_success()
def display_confirm_target(self):
self._notice['target']['clear']['function'] = 'clear_target_taxes'
self._notice['target']['clear_demo']['function'] = 'clear_target_products_demo'
return response_success()
# TODO clear demo
def clear_target_manufacturers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['manufacturers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_MANUFACTURER
}
manufacturers = self.select_obj(TABLE_MAP, where)
manufacturer_ids = list()
if manufacturers['result'] == 'success':
manufacturer_ids = duplicate_field_value_from_list(manufacturers['data'], 'id_desc')
if not manufacturer_ids:
return next_clear
manufacturer_id_con = self.list_to_in_condition(manufacturer_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' AND term_id IN " + manufacturer_id_con
}
manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if manufacturers['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return self._notice['target']['clear_demo']
def clear_target_categories_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['categories']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CATEGORY
}
categories = self.select_obj(TABLE_MAP, where)
category_ids = list()
if categories['result'] == 'success':
category_ids = duplicate_field_value_from_list(categories['data'], 'id_desc')
if not category_ids:
return next_clear
category_id_con = self.list_to_in_condition(category_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' AND term_id IN " + category_id_con
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if categories['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'tax_product_cat' AND element_id IN " + category_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category' and type_id IN " + category_id_con
})
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return next_clear
def clear_target_products_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
if not self._notice['config']['products']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_PRODUCT
}
products = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
product_ids = list()
if products['result'] == 'success':
product_ids = duplicate_field_value_from_list(products['data'], 'id_desc')
if not product_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
product_id_con = self.list_to_in_condition(product_ids)
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` "
"WHERE ID IN " + product_id_con + " OR post_parent IN " + product_id_con
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(collections_query)})
all_post_id = list()
if products['data']:
all_post_id = duplicate_field_value_from_list(products['data'], 'ID')
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` "
"WHERE ID IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_post_meta`"
" WHERE post_id IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
where = {
'migration_id': self._migration_id,
'type': self.TYPE_OPTION
}
attibutes = self.select_obj(TABLE_MAP, where)
attibutes_ids = list()
attibutes_codes = list()
if attibutes['result'] == 'success':
attibutes_ids = duplicate_field_value_from_list(attibutes['data'], 'id_desc')
attibutes_codes = duplicate_field_value_from_list(attibutes['data'], 'value')
if attibutes_ids:
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attibutes_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + self.list_to_in_condition(attibutes_codes)
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'post_product' AND element_id IN " + product_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product' and type_id IN " + product_id_con
})
})
self.delete_map_demo(self.TYPE_PRODUCT, product_ids)
if product_ids and to_len(product_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_customers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['customers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CUSTOMER
}
customers = self.select_obj(TABLE_MAP, where)
customer_ids = list()
if customers['result'] == 'success':
customer_ids = duplicate_field_value_from_list(customers['data'], 'id_desc')
if not customer_ids:
return next_clear
customer_id_con = self.list_to_in_condition(customer_ids)
del_user_query = "DELETE FROM _DBPRF_users WHERE ID IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE FROM _DBPRF_usermeta WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE FROM _DBPRF_wc_customer_lookup WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
return next_clear
def clear_target_orders_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_reviews_demo',
}
if not self._notice['config']['orders']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_ORDER
}
orders = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
order_ids = list()
if orders['result'] == 'success':
order_ids = duplicate_field_value_from_list(orders['data'], 'id_desc')
if not order_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') AND ID IN " + self.list_to_in_condition(
order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE FROM `_DBPRF_post_meta` WHERE post_id IN " + self.list_to_in_condition(order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
self.delete_map_demo(self.TYPE_ORDER, order_ids)
if order_ids and to_len(order_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_reviews_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_pages_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['reviews']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_REVIEW
}
reviews = self.select_obj(TABLE_MAP, where)
review_ids = list()
if reviews['result'] == 'success':
review_ids = duplicate_field_value_from_list(reviews['data'], 'id_desc')
if not review_ids:
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
where = ''
if table == 'comments':
where = " WHERE comment_ID IN " + self.list_to_in_condition(review_ids)
if table == 'commentmeta':
where = " WHERE comment_id IN " + self.list_to_in_condition(review_ids)
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`" + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# TODO: clear
def clear_target_taxes(self):
next_clear = {
'result': 'process',
'function': 'clear_target_manufacturers',
'msg': ''
}
if not self._notice['config']['taxes']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'options',
'woocommerce_tax_rates',
'woocommerce_tax_rate_locations',
'wc_tax_rate_classes'
]
for table in tables:
if table == 'options':
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "UPDATE `_DBPRF_" + table + "` SET `option_value` = '' WHERE `option_name` = 'woocommerce_tax_classes'"
})
})
continue
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + table + "` WHERE 1"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_manufacturers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories',
'msg': ''
}
if not self._notice['config']['manufacturers']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
tables = ['termmeta', 'terms', 'term_relationships', 'term_taxonomy']
for table in tables:
where = ''
if table in ['termmeta', 'terms']:
where = " term_id IN (SELECT term_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table in ['term_relationships']:
where = " term_taxonomy_id IN (SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table == 'term_taxonomy':
where = " taxonomy = " + self.escape(taxonomy)
query = "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
clear_table = self.query_data_connector({'type': 'delete', 'query': query})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# if manufacturers:
# while manufacturers['data']:
# if not manufacturers:
# return next_clear
# term_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
# all_queries = list()
# taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
# taxonomy_ids)
# })
# if all_queries:
# self.import_multiple_data_connector(all_queries, 'cleardemo')
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if taxonomy in option_value:
option_value[taxonomy] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'manufacturer')
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_categories(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products',
'msg': ''
}
if not self._notice['config']['categories']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
while self._check_categories_exists():
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 200"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if not categories:
return next_clear
term_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
taxnomy_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(taxnomy_query)})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
})})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
})
# end for
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type = 'tax_product_cat'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category'"
})
})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if 'product_cat' in option_value:
option_value['product_cat'] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'category')
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_categories_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 1"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if categories['data'] else False
def _check_product_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('product', 'product_variation') LIMIT 1"
}
# products = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
products = self.select_data_connector(all_collections_query, 'products')
return True if products['data'] else False
def _check_attributes_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_products(self):
next_clear = {
'result': 'process',
'function': 'clear_target_customers',
'msg': ''
}
if not self._notice['config']['products']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_product_exists():
# clear posts(product)
# clear meta post(product)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN('product', 'product_variation')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_postmeta` WHERE post_id NOT IN (SELECT ID FROM _DBPRF_posts)"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear attributes
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
while self._check_attributes_exists():
product_attribute_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
attributes = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(product_attribute_query)})
if (attributes['data']):
attribute_ids = duplicate_field_value_from_list(attributes['data'], 'attribute_id')
attribute_names = duplicate_field_value_from_list(attributes['data'], 'attribute_name')
attribute_names_condition = "('pa_" + "','pa_".join(attribute_names) + "')"
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attribute_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + attribute_names_condition
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type IN ('post_product','post_product_variation'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product'"
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_customers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders',
'msg': ''
}
if not self._notice['config']['customers']:
self._notice['target']['clear'] = next_clear
return next_clear
# "DELETE FROM `wp_usermeta`
# WHERE meta_key IN ('wp_capabilities', 'wp_capabilities') AND meta_value = 'a:1:{s:8:"customer";b:1;}'"
del_user_query = "DELETE _DBPRF_users FROM _DBPRF_users " \
"LEFT JOIN _DBPRF_usermeta ON _DBPRF_users.ID = _DBPRF_usermeta.user_id " \
"WHERE _DBPRF_usermeta.meta_key IN ('_DBPRF_capabilities', '_DBPRF_capabilities') " \
"AND _DBPRF_usermeta.meta_value = 'a:1:{s:8:\"customer\";b:1;}'"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE _DBPRF_usermeta FROM _DBPRF_usermeta " \
"LEFT JOIN _DBPRF_users ON _DBPRF_usermeta.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE _DBPRF_wc_customer_lookup FROM _DBPRF_wc_customer_lookup LEFT JOIN _DBPRF_users ON _DBPRF_wc_customer_lookup.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_order_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') LIMIT 1"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_orders(self):
next_clear = {
'result': 'process',
'function': 'clear_target_reviews',
'msg': ''
}
if not self._notice['config']['orders']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_order_exists():
# clear posts(orders)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE `_DBPRF_postmeta` FROM `_DBPRF_post_meta` pm LEFT JOIN `_DBPRF_posts` p ON p.ID = pm.meta_id"
" WHERE p.ID IS NULL"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_reviews(self):
next_clear = {
'result': 'process',
'function': 'clear_target_blogs',
'msg': ''
}
if not self._notice['config']['reviews']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
self._notice['target']['clear']['result'] = 'process'
self._notice['target']['clear']['function'] = 'clear_target_reviews'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
# def clear_target_blogs(self):
# next_clear = {
# 'result': 'process',
# 'function': 'clear_target_coupons',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config'].get('blogs'):
# return next_clear
# all_queries = {
# 'term': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_terms WHERE term_id IN (SELECT term_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag']) + ')'
# },
# 'term_taxonomy': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag'])
# },
# 'term_relationship': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'postmeta': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_postmeta WHERE post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'posts': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_posts WHERE post_type = "post"'
# },
# }
# delete = self.query_multiple_data_connector(all_queries, 'clear_blog')
# return next_clear
def clear_target_coupons(self):
next_clear = {
'result': 'process',
'function': 'clear_target_pages',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['coupons']:
return next_clear
tables = [
'postmeta',
'posts'
]
for table in tables:
where = ' post_type = "shop_coupon"'
if table == 'postmeta':
where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "shop_coupon")'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
return next_clear
# def clear_target_pages(self):
# next_clear = {
# 'result': 'process',
# 'function': '',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config']['pages']:
# return next_clear
# tables = [
# 'postmeta',
# 'posts'
# ]
# for table in tables:
# where = ' post_type = "page"'
# if table == 'postmeta':
# where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "page")'
# clear_table = self.get_connector_data(self.get_connector_url('query'), {
# 'query': json.dumps({
# 'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
# })
# })
# if (not clear_table) or (clear_table['result'] != 'success'):
# self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
# continue
# return next_clear
# TODO: TAX
def prepare_taxes_import(self):
return self
def prepare_taxes_export(self):
return self
def get_taxes_main_export(self):
id_src = self._notice['process']['taxes']['id_src']
limit = self._notice['setting']['taxes']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
# taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
taxes = self.select_data_connector(query, 'taxes')
if not taxes or taxes['result'] != 'success':
return response_error('could not get taxes main to export')
list_taxes = response_success()
if taxes['data'] and to_len(taxes['data']) > 0:
list_taxes['data'] = list()
for tax in taxes['data']:
_taxes = tax['option_value'].splitlines()
if _taxes:
tmp_taxes = [
{
'id': 1,
'name': 'Standard'
}
]
i = 2
for tax_name in _taxes:
tax_data = dict()
tax_data['id'] = i
tax_data['name'] = tax_name
tmp_taxes.append(tax_data)
i += 1
list_taxes['data'].extend(tmp_taxes)
return list_taxes
def get_taxes_ext_export(self, taxes):
url_query = self.get_connector_url('query')
tax_product_class_names = duplicate_field_value_from_list(taxes['data'], 'name')
tax_names = list()
for class_name in tax_product_class_names:
_class_name = to_str(class_name).lower()
_class_name = _class_name.replace(' ', '-')
tax_names.append(_class_name)
taxes_ext_queries = {
'tax_rates': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rates WHERE 1"
# tax_rate_class IN " + self.list_to_in_condition(tax_names),
}
}
# taxes_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(taxes_ext_queries)})
taxes_ext = self.select_multiple_data_connector(taxes_ext_queries, 'taxes')
if not taxes_ext or taxes_ext['result'] != 'success':
return response_error()
tax_zone_ids = duplicate_field_value_from_list(taxes_ext['data']['tax_rates'], 'tax_rate_id')
taxes_ext_rel_queries = {
'tax_rates_location': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rate_locations WHERE tax_rate_id IN " + self.list_to_in_condition(
tax_zone_ids),
}
}
# taxes_ext_rel = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(taxes_ext_rel_queries)})
taxes_ext_rel = self.select_multiple_data_connector(taxes_ext_rel_queries, 'taxes')
if not taxes_ext_rel or taxes_ext_rel['result'] != 'success':
return response_error()
taxes_ext = self.sync_connector_object(taxes_ext, taxes_ext_rel)
return taxes_ext
def convert_tax_export(self, tax, taxes_ext):
tax_zones = list()
tax_rate_class_1 = to_str(tax['name']).lower()
tax_rate_class_1 = tax_rate_class_1.replace(' ', '-')
if tax['name'] == 'Standard':
tax_rate_class_1 = ''
src_tax_rate = get_list_from_list_by_field(taxes_ext['data']['tax_rates'], 'tax_rate_class', tax_rate_class_1)
if src_tax_rate and to_len(src_tax_rate) > 0:
for tax_rate in src_tax_rate:
tax_zone = self.construct_tax_zone()
# tax_zone = self.addConstructDefault(tax_zone)
tax_zone['id'] = tax_rate['tax_rate_id']
tax_zone['name'] = tax_rate['tax_rate_name']
tax_zone_country = self.construct_tax_zone_country()
tax_zone_country['name'] = self.get_country_name_by_code(tax_rate['tax_rate_country']) if tax_rate['tax_rate_country'] else ''
tax_zone_country['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone_country['country_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone['country'] = tax_zone_country
tax_zone_state = self.construct_tax_zone_state()
tax_zone_state['name'] = ''
tax_zone_state['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone_state['state_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone['state'] = tax_zone_state
tax_zone['rate'] = self.construct_tax_zone_rate()
tax_zone['rate']['id'] = tax_rate['tax_rate_id']
tax_zone['rate']['name'] = tax_rate['tax_rate_name']
tax_zone['rate']['code'] = tax_rate['tax_rate_class']
tax_zone['rate']['rate'] = tax_rate['tax_rate']
tax_rates_locations = get_list_from_list_by_field(taxes_ext['data']['tax_rates_location'], 'tax_rate_id', tax_rate['tax_rate_id'])
tax_zone_city = get_list_from_list_by_field(tax_rates_locations, 'location_type', 'city')
tax_zone['postcode'] = get_row_value_from_list_by_field(tax_rates_locations, 'location_type', 'postcode', 'location_code')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
tax_zone['city'] += _tax_zone_city['location_code'] + ';'
tax_zone['priority'] = tax_rate['tax_rate_priority']
tax_zone['compound'] = True if tax_rate['tax_rate_compound'] and to_int(tax_rate['tax_rate_compound']) == 1 else False
tax_zone['is_shipping'] = True if tax_rate['tax_rate_shipping'] and to_int(tax_rate['tax_rate_shipping']) == 1 else False
tax_zones.append(tax_zone)
tax_product = self.construct_tax_product()
tax_product = self.add_construct_default(tax_product)
tax_code = to_str(tax['name']).lower()
tax_code = tax_code.replace(' ', '-')
tax_product['name'] = tax['name']
tax_product['code'] = tax_code
tax_product['created_at'] = get_current_time()
tax_product['updated_at'] = get_current_time()
tax_products = [tax_product]
tax_data = self.construct_tax()
tax_data = self.add_construct_default(tax_data)
# id_src = self._notice['process']['taxes']['id_src']
tax_data['id'] = tax['id']
tax_data['code'] = tax_code # tax['name']
tax_data['name'] = tax['name']
tax_data['created_at'] = get_current_time()
tax_data['updated_at'] = get_current_time()
tax_data['tax_zones'] = tax_zones
tax_data['tax_products'] = tax_products
return response_success(tax_data)
def get_tax_id_import(self, convert, tax, taxes_ext):
# id_src = self._notice['process']['taxes']['id_src']
return tax['id']
def check_tax_import(self, convert, tax, taxes_ext):
return True if self.get_map_field_by_src(self.TYPE_TAX, convert['id'], convert['code']) else False
def router_tax_import(self, convert, tax, taxes_ext):
return response_success('tax_import')
def before_tax_import(self, convert, tax, taxes_ext):
return response_success()
def tax_import(self, convert, tax, taxes_ext):
slug = self.sanitize_title(convert['name'])
if convert['name'] != 'Standard':
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
taxes = self.select_data_connector(query, 'taxes')
if taxes and taxes['data']:
old_tax_data = taxes['data'][0]
new_option_value = old_tax_data['option_value'] + '\n' + convert['name'] if old_tax_data['option_value'] else convert['name']
query_update = {
'type': 'query',
'query': "UPDATE `_DBPRF_options` SET `option_value` = '" + new_option_value + "' WHERE `option_name` = 'woocommerce_tax_classes'"
}
taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_update)})
else:
tax_data = {
'option_name': 'woocommerce_tax_classes',
'option_value': convert['name'],
'autoload': 'yes'
}
tax_query = self.create_insert_query_connector('options', tax_data)
tax_import = self.import_tax_data_connector(tax_query, True, convert['id'])
if self.convert_version(self._notice['target']['config']['version'], 2) >= 370:
tax_rate_classes = {
'name': convert['name'],
'slug': slug
}
tax_rate_classes_query = self.create_insert_query_connector('wc_tax_rate_classes', tax_rate_classes)
tax_rate_classes_import = self.import_data_connector(tax_rate_classes_query, 'wc_tax_rate_classes')
tax_code = to_str(convert['name']).lower()
tax_code = self.sanitize_title(tax_code.replace(' ', '-'))
self.insert_map(self.TYPE_TAX, convert['id'], 0, convert['code'], tax_code)
return response_success(convert['id'])
def after_tax_import(self, tax_id, convert, tax, taxes_ext):
if convert['tax_zones']:
tax_code = to_str(convert['name']).lower()
tax_code = tax_code.replace(' ', '-')
for tax_zone in convert['tax_zones']:
tax_rate = {
'tax_rate_country': tax_zone['country']['country_code'],
'tax_rate_state': tax_zone['state']['state_code'] if tax_zone['state']['state_code'] else '*',
'tax_rate': tax_zone['rate']['rate'] if tax_zone['rate']['rate'] else '*',
'tax_rate_name': tax_zone['rate']['name'] if tax_zone['rate']['name'] else 'Tax',
'tax_rate_priority': tax_zone.get('priority', 1),
'tax_rate_compound': 1 if tax_zone.get('compound') else 0,
'tax_rate_shipping': 1 if tax_zone.get('is_shipping') else 0,
'tax_rate_order': 0,
'tax_rate_class': '' if convert['name'] == 'Standard' else self.convert_attribute_code(tax_code)
}
tax_rate_query = self.create_insert_query_connector('woocommerce_tax_rates', tax_rate)
tax_rate_import = self.import_data_connector(tax_rate_query, 'tax')
if get_value_by_key_in_dict(tax_zone, 'postcode', False):
location_postcode = {
'location_code': get_value_by_key_in_dict(tax_zone, 'postcode', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'postcode'
}
self.import_data_connector(
self.create_insert_query_connector('woocommerce_tax_rate_locations', location_postcode), 'tax')
if get_value_by_key_in_dict(tax_zone, 'city', False):
tax_zone_city = tax_zone['city'].split(';')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
if _tax_zone_city != '' and _tax_zone_city != ' ':
location_city = {
'location_code': get_value_by_key_in_dict(tax_zone, 'city', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'city'
}
self.import_data_connector(self.create_insert_query_connector('woocommerce_tax_rate_locations', location_city), 'tax')
return response_success()
def addition_tax_import(self, convert, tax, taxes_ext):
return response_success()
# TODO: MANUFACTURER
def prepare_manufacturers_import(self):
return self
def prepare_manufacturers_export(self):
return self
def get_manufacturers_main_export(self):
id_src = self._notice['process']['manufacturers']['id_src']
limit = self._notice['setting']['manufacturers']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE (tx.taxonomy = 'product_brand' OR tx.taxonomy = 'brand' OR tx.taxonomy = 'pwb-brand') AND tx.term_id > " + to_str(
id_src) + " ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
# manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
manufacturers = self.select_data_connector(query, 'manufacturers')
if not manufacturers or manufacturers['result'] != 'success':
return response_error('could not get manufacturers main to export')
return manufacturers
def get_manufacturers_ext_export(self, manufacturers):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturers_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = 'product_cat' AND tx.term_id > 0 "
}
}
if cart_version > 223:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
manufacturers_ext_queries['brand_taxonomy_images'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name IN " + self.brand_image_in_condition(category_ids)
}
# manufacturers_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_queries)
# })
manufacturers_ext = self.select_multiple_data_connector(manufacturers_ext_queries, 'manufacturers')
if not manufacturers_ext or manufacturers_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'meta_key',
'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
manufacturers_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
}
# add custom
if manufacturers_ext_rel_queries:
# manufacturers_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_rel_queries)
# })
manufacturers_ext_rel = self.select_multiple_data_connector(manufacturers_ext_rel_queries, 'manufacturers')
if not manufacturers_ext_rel or manufacturers_ext_rel['result'] != 'success':
return response_error()
manufacturers_ext = self.sync_connector_object(manufacturers_ext, manufacturers_ext_rel)
return manufacturers_ext
def convert_manufacturer_export(self, manufacturer, manufacturers_ext):
manufacturer_data = self.construct_manufacturer()
manufacturer_data = self.add_construct_default(manufacturer_data)
manufacturer_path = manufacturer_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturer_src = False
if cart_version > 223:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'term_id', manufacturer['term_id'])
else:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', manufacturer['term_id'])
if manufacturer_src:
manufacturer_img_id = self.get_value_metadata(manufacturer_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(manufacturers_ext['data']['post_meta'], 'ID', manufacturer_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
manufacturer_path = img_meta[0]['meta_value']
manufacturer_url = to_str(img_meta[0]['guid']).replace(img_meta[0]['meta_value'], '')
brand_image = get_row_value_from_list_by_field(manufacturers_ext['data']['brand_taxonomy_images'], 'option_name', "brand_taxonomy_image" + to_str(manufacturer['term_id']), 'option_value')
if brand_image:
manufacturer_url = brand_image
manufacturer_data['id'] = manufacturer['term_id']
manufacturer_data['code'] = manufacturer['slug']
manufacturer_data['name'] = manufacturer['name']
manufacturer_data['description'] = manufacturer['description']
manufacturer_data['thumb_image']['label'] = img_label
manufacturer_data['thumb_image']['url'] = manufacturer_url
manufacturer_data['thumb_image']['path'] = manufacturer_path
manufacturer_data['created_at'] = get_current_time()
manufacturer_data['updated_at'] = get_current_time()
language_id = self._notice['src']['language_default']
manufacturer_language_data = dict()
manufacturer_language_data['name'] = manufacturer['name']
manufacturer_language_data['description'] = manufacturer['description']
manufacturer_data['languages'][language_id] = manufacturer_language_data
manufacturer_data['manufacturer'] = manufacturer
manufacturer_data['manufacturers_ext'] = manufacturers_ext
return response_success(manufacturer_data)
def get_manufacturer_id_import(self, convert, manufacturer, manufacturers_ext):
return manufacturer['term_id']
def check_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return True if self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['id']) else False
def router_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success('manufacturer_import')
def before_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
def manufacturer_import(self, convert, manufacturer, manufacturers_ext):
slug = self.sanitize_title(convert['name'])
manufacturer_term = {
'name': convert['name'],
'slug': convert['code'] if convert['code'] else slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'category')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': taxonomy,
'description': get_value_by_key_in_dict(convert, 'description', ''),
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if not manufacturer_taxonomy_import:
return response_warning('manufacturer ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_MANUFACTURER, convert['id'], manufacturer_taxonomy_import, convert['code'])
thumbnail_id = False
cate_image = ''
if convert['thumb_image']['url'] or convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'], convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path'], self.TYPE_MANUFACTURER), self._notice['target']['config']['image_manufacturer'].rstrip('/')))
if image_import_path:
cate_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_category'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(cate_image, image_details)
if thumbnail_id:
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'pwb_brand_image',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'thumbnail_id',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
'meta_key': 'brand_image_url',
'meta_value': self._notice['target']['cart_url'].rstrip('/') + '/wp-content/uploads/' + cate_image.lstrip('/')
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
if self.is_wpml() or self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if not option_value:
option_value = dict()
if taxonomy not in option_value.keys():
option_value[taxonomy] = dict()
option_value[taxonomy][to_int(term_id)] = {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
self.import_data_connector(self.create_update_query_connector('options', data_set, where), 'manufacturer')
else:
new_option_data = {
'option_name': 'wpseo_taxonomy_meta',
'option_value': php_serialize({
taxonomy: {
to_int(term_id): {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
}
}),
'autoload': 'yes'
}
self.import_data_connector(self.create_insert_query_connector('options', new_option_data), 'manufacturer')
return response_success(manufacturer_taxonomy_import)
def after_manufacturer_import(self, manufacturer_id, convert, manufacturer, manufacturers_ext):
return response_success()
def addition_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
# TODO: CATEGORY
def prepare_categories_import(self):
parent = super().prepare_categories_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
return self
def prepare_categories_export(self):
return self
def get_categories_main_export(self):
id_src = self._notice['process']['categories']['id_src']
limit = self._notice['setting']['categories']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE tx.taxonomy = 'product_cat' AND tx.term_id > " + to_str(
id_src) + " AND t.term_id IS NOT NULL ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
# "LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
# "LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
}
# categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
categories = self.select_data_connector(query, 'categories')
if not categories or categories['result'] != 'success':
return response_error('could not get manufacturers main to export')
return categories
def get_categories_ext_export(self, categories):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
parent_ids = duplicate_field_value_from_list(categories['data'], 'parent')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
taxonomy_type = 'product_cat' if not categories.get('is_blog') else 'category'
categories_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id > 0 "
},
'seo_categories': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id IN " + self.list_to_in_condition(parent_ids)
}
}
if cart_version > 255:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
# add wpml
if self._notice['src']['support']['wpml']:
categories_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'tax_product_cat' and element_id IN " + self.list_to_in_condition(
category_ids)
}
# categories_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_queries)
# })
categories_ext = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(categories_ext_queries)
})
if not categories_ext or categories_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'meta_key', 'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
categories_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
# 'seo_category': array(
# 'type': 'select',
# 'query': "SELECT * FROM _DBPRF_options WHERE option_id = 235866",
# ),
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(categories_ext['data']['icl_translations'], 'trid')
categories_ext_rel_queries['wpml_category_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type = 'tax_product_cat' and il.trid IN " + self.list_to_in_condition(trids)
}
# add seo
# if (self._notice['config']['seo']){
# ext_rel_seo_queries = model_seo->getCategoriesSeoExtRelQuery(this, categories, categories_ext)
# categories_ext_rel_queries = array_merge(categories_ext_rel_queries, ext_rel_seo_queries)
# }
# add custom
if categories_ext_rel_queries:
# categories_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_rel_queries)
# })
categories_ext_rel = self.select_multiple_data_connector(categories_ext_rel_queries, 'categories')
if not categories_ext_rel or categories_ext_rel['result'] != 'success':
return response_error()
categories_ext = self.sync_connector_object(categories_ext, categories_ext_rel)
return categories_ext
def convert_category_export(self, category, categories_ext):
category_data = self.construct_category() if not self.blog_running else self.construct_blog_category()
# category_data = self.add_construct_default(category_data)
parent = self.construct_category_parent() if not self.blog_running else self.construct_blog_category()
parent['id'] = 0
if category['parent'] and to_int(category['parent']) > 0:
parent_data = self.get_category_parent(category['parent'])
if parent_data['result'] == 'success' and parent_data['data']:
parent = parent_data['data']
category_path = img_meta = category_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
if cart_version > 255:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'term_id', category['term_id'])
else:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', category['term_id'])
if category_src:
category_img_id = self.get_value_metadata(category_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(categories_ext['data']['post_meta'], 'ID', category_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
category_path = to_str(img_meta[0]['meta_value'])
category_url = to_str(img_meta[0]['guid']).replace(category_path, '')
category_data['id'] = category['term_id']
category_data['code'] = category['slug']
category_data['name'] = category['name']
category_data['description'] = category['description']
category_data['parent'] = parent
category_data['active'] = True
category_data['thumb_image']['label'] = img_label
category_data['thumb_image']['url'] = category_url
category_data['thumb_image']['path'] = category_path
category_data['sort_order'] = 1
category_data['created_at'] = get_current_time()
category_data['updated_at'] = get_current_time()
category_data['category'] = category
category_data['categories_ext'] = categories_ext
# todo: woo2woo
category_data['display_type'] = self.get_value_metadata(category_src, 'display_type', '')
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(categories_ext['data']['icl_translations'], 'element_id', category['term_taxonomy_id'], 'trid')
if trid:
languages_data = get_list_from_list_by_field(categories_ext['data']['wpml_category_lang'], 'trid', trid)
if languages_data:
for language_data in languages_data:
category_new_data = self.construct_category_lang()
category_new_data['id'] = language_data['term_id']
category_new_data['code'] = language_data['slug']
category_new_data['name'] = language_data['name']
category_new_data['description'] = language_data['description']
if to_int(language_data['term_id']) == to_int(category['term_id']):
category_data['language_default'] = language_data['language_code']
elif 'language_default' not in category_data and not language_data['source_language_code']:
category_data['language_default'] = language_data['language_code']
category_data['languages'][language_data['language_code']] = category_new_data
else:
category_language_data = self.construct_category_lang()
language_id = self._notice['src']['language_default']
category_language_data['name'] = category['name']
category_language_data['description'] = category['description']
category_data['languages'][language_id] = category_language_data
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'product_cat' in option_value:
if to_int(category['term_id']) in option_value['product_cat']:
category_data['meta_title'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_title', '')
category_data['meta_description'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_desc', '')
category_data['meta_keyword'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_focuskw', '')
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
category_data['seo'] = getattr(self, 'categories_' + detect_seo)(category, categories_ext)
return response_success(category_data)
def get_category_parent(self, parent_id):
type_map = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
category_exist = self.select_map(self._migration_id, type_map, parent_id)
if category_exist:
return response_success({
'id': parent_id,
'code': ''
})
taxonomy_type = 'product_cat' if not self.blog_running else 'category'
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id = " + to_str(parent_id)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' AND il.`source_language_code` IS NULL AND tt.taxonomy = '" + taxonomy_type + "' and tt.term_id = " + to_str(parent_id),
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
if not categories or categories['result'] != 'success':
return response_error('could not get category parent to export')
if categories and categories['data']:
category = categories['data'][0]
categories_ext = self.get_categories_ext_export(categories)
category_convert = self.convert_category_export(category, categories_ext)
return category_convert
return response_error('could not get category parent to export')
def get_category_id_import(self, convert, category, categories_ext):
return category['term_id']
def check_category_import(self, convert, category, categories_ext):
id_imported = self.get_map_field_by_src(self.TYPE_CATEGORY, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
return True if id_imported else False
def router_category_import(self, convert, category, categories_ext):
return response_success('category_import')
def before_category_import(self, convert, category, categories_ext):
return response_success()
def category_import(self, convert, category, categories_ext):
category_data = {
'name': convert['name'],
'slug': convert['url_key']
}
id_category = self.import_category_data_connector(self.create_insert_query_connector('terms', category_data),
True, convert['id'])
self.insert_map(self.TYPE_CATEGORY, convert['id'], id_category, convert['code'])
meta_cate_data = {
'order': 0,
'display_type': '',
'thumbnail_id': '',
'product_count_product_cat': '0'
}
for meta_key, meta_value in meta_cate_data.items():
meta_data = {
'term_id': id_category,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_category_data_connector(self.create_insert_query_connector('termmeta', data=meta_data), 'categories')
taxonomy_cate_data = {
'term_id': id_category,
'taxonomy': 'product_cat',
'description': convert['description'],
'parent': '',
'count': ''
}
self.import_category_data_connector(self.create_insert_query_connector('term_taxonomy', taxonomy_cate_data), True, convert['id'])
return response_success(id_category)
def get_new_trid(self):
query = {
'type': 'select',
'query': "SELECT max(trid) as trid FROM _DBPRF_icl_translations"
}
trid = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
new_trid = 1
if trid['data']:
new_trid = to_int(trid['data'][0]['trid']) + 1
return new_trid
def after_category_import(self, category_id, convert, category, categories_ext):
return response_success()
def addition_category_import(self, convert, category, categories_ext):
return response_success()
# TODO: PRODUCT
def prepare_products_import(self):
parent = super().prepare_products_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if not self._notice['config']['add_new']:
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if os.path.isfile(file_name):
os.remove(file_name)
return self
def prepare_products_export(self):
return self
def get_products_main_export(self):
id_src = self._notice['process']['products']['id_src']
limit = self._notice['setting']['products']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') ORDER BY ID ASC LIMIT " + to_str(limit),
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE il.`element_type` = 'post_product' and il.`source_language_code` is NULL and p.ID and p.ID > " + to_str(
# id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
# limit),
'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' AND p.ID > " + to_str(
id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
limit),
}
# products = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success':
return response_error()
return products
def get_products_ext_export(self, products):
url_query = self.get_connector_url('query')
product_ids = duplicate_field_value_from_list(products['data'], 'ID')
product_id_con = self.list_to_in_condition(product_ids)
# product_id_query = self.product_to_in_condition_seourl(product_ids)
linked = self.product_to_in_condition_linked(product_ids)
product_ext_queries = {
'post_variant': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'product_variation' AND post_parent IN " + product_id_con,
},
'term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + product_id_con,
},
'post_grouped': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_parent IN " + product_id_con + " AND post_type = 'product'",
},
'parent_link': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key IN ('_upsell_ids','_crosssell_ids') AND meta_value " + linked
},
}
if self._notice['src']['support']['wpml']:
product_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and element_id IN " + product_id_con
}
# products_ext = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_queries)
# })
products_ext = self.select_multiple_data_connector(product_ext_queries, 'products')
if (not products_ext) or products_ext['result'] != 'success':
return response_error()
pro_child_ids = duplicate_field_value_from_list(products_ext['data']['post_variant'], 'ID')
all_product_ids = self.list_to_in_condition(list(set(pro_child_ids + product_ids)))
variant_id_query = self.list_to_in_condition(pro_child_ids)
taxonomy_duplicate = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'taxonomy')
attrs_taxonomy = self.get_list_from_list_by_field_as_first_key(taxonomy_duplicate, '', 'pa_')
attrs_name = list()
for attr_taxonomy in attrs_taxonomy:
attrs_name.append(self.substr_replace(attr_taxonomy, '', 0, 3))
attr_name_query = self.list_to_in_condition(attrs_name)
attr_values = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'term_id')
attr_values_query = self.list_to_in_condition(attr_values)
product_ext_rel_queries = {
'post_meta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + all_product_ids,
},
'woocommerce_attribute_taxonomies': {
'type': "select",
'query': "SELECT * FROM _DBPRF_woocommerce_attribute_taxonomies WHERE attribute_name IN " + attr_name_query,
},
'variation_term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + variant_id_query,
},
'term_attribute': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_terms WHERE term_id IN " + attr_values_query,
}
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(products_ext['data']['icl_translations'], 'trid')
product_ext_rel_queries['wpml_product_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_posts as p ON il.element_id = p.ID "
"WHERE il.element_type = 'post_product' and il.trid IN " + self.list_to_in_condition(trids)
}
product_ext_rel_queries['wpml_product_meta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")"
}
product_ext_rel_queries['wpml_term_relationship'] = {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tr.object_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")",
}
product_ext_rel_queries['attributes_icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type IN " + self.wpml_attributes_to_in_condition(
attrs_taxonomy)
}
# products_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_rel_queries)
products_ext_rel = self.select_multiple_data_connector(product_ext_rel_queries, 'products')
if (not products_ext_rel) or products_ext_rel['result'] != 'success':
return response_error()
thumbnail_id_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumbnail_id_list, 'meta_value')
gallery_ids = gallery_ids_src = list()
gallery_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_product_image_gallery')
if gallery_list:
for gallery in gallery_list:
if 'meta_value' in gallery and gallery['meta_value']:
images_ids = gallery['meta_value'].split(',')
if images_ids:
gallery_ids = list(set(gallery_ids + images_ids))
for id in gallery_ids:
if id != '':
gallery_ids_src.append(id)
all_images_ids = list(set(thumbnail_ids + gallery_ids_src))
all_images_ids_query = self.list_to_in_condition(all_images_ids)
product_ext_rel_third_queries = {
'image': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' "
"WHERE p.ID IN " + all_images_ids_query,
}
}
products_ext_third = self.get_connector_data(url_query, {
'serialize': True, 'query': json.dumps(product_ext_rel_third_queries)
})
if (not products_ext_third) or products_ext_third['result'] != 'success':
return response_error()
products_ext1 = self.sync_connector_object(products_ext_rel, products_ext_third)
products_ext = self.sync_connector_object(products_ext, products_ext1)
return products_ext
def convert_product_export(self, product, products_ext):
product_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product['ID'])
product_data = self.construct_product()
product_data = self.add_construct_default(product_data)
product_data['id'] = product['ID']
product_data['code'] = product['post_name']
product_data['sku'] = self.get_value_metadata(product_meta, '_sku')
# todo: get type prd virtual
product_type = get_row_value_from_list_by_field(product_meta, 'meta_key', '_virtual', 'meta_value')
if product_type == 'yes':
product_data['type'] = 'virtual'
product_price = ''
if to_decimal(self.get_value_metadata(product_meta, '_regular_price', 0.0000)) > 0:
product_price = self.get_value_metadata(product_meta, '_regular_price', 0.0000)
else:
product_price = self.get_value_metadata(product_meta, '_price', 0.0000)
if product_price == '' or product_price == self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000):
product_price = self.get_value_metadata(product_meta, '_min_variation_regular_price', 0.0000)
if product_price == '' or not product_price:
product_price = 0
product_data['price'] = product_price
product_data['weight'] = self.get_value_metadata(product_meta, '_weight', 0.0000)
product_data['length'] = self.get_value_metadata(product_meta, '_length', 0.0000)
product_data['width'] = self.get_value_metadata(product_meta, '_width', 0.0000)
product_data['height'] = self.get_value_metadata(product_meta, '_height', 0.0000)
product_data['status'] = True if product['post_status'] == "publish" else False
product_data['manage_stock'] = True if self.get_value_metadata(product_meta, '_manage_stock', 'no') == "yes" else False
if self.is_woo2woo():
product_data['is_in_stock'] = self.get_value_metadata(product_meta, '_stock_status', 'instock')
product_data['sold_individually'] = self.get_value_metadata(product_meta, '_sold_individually', '')
product_data['purchase_note'] = self.get_value_metadata(product_meta, '_purchase_note', '')
else:
product_data['is_in_stock'] = True if self.get_value_metadata(product_meta, '_stock_status', 'instock') == "instock" else False
product_data['qty'] = to_int(to_decimal(self.get_value_metadata(product_meta, '_stock', 0))) if to_decimal(self.get_value_metadata(product_meta, '_stock', 0)) > 0 else 0
product_data['created_at'] = convert_format_time(product['post_date'])
product_data['updated_at'] = convert_format_time(product['post_modified'])
product_data['name'] = product['post_title']
product_data['description'] = product['post_content']
product_data['short_description'] = product['post_excerpt']
product_data['menu_order'] = product['menu_order']
product_data['sort_order'] = product['menu_order']
product_data['backorders'] = self.get_value_metadata(product_meta, '_backorders', 'no')
product_data['meta_description'] = self.get_value_metadata(product_meta, '_yoast_wpseo_metadesc', '')
product_data['meta_title'] = self.get_value_metadata(product_meta, '_yoast_wpseo_title', '')
if product_data['meta_title']:
product_data['meta_title'] = product_data['meta_title'].replace('%%title%%', product_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
# image_
thumbnail_id = self.get_value_metadata(product_meta, '_thumbnail_id', 0)
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
product_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
product_data['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + to_str(thumbnail_src[0]['meta_value']).lstrip('/')
product_data['thumb_image']['url'] = to_str(product_data['thumb_image']['url']).replace('uploads/uploads', 'uploads')
gallery_ids = self.get_value_metadata(product_meta, '_product_image_gallery', '')
if gallery_ids:
gallery_ids = gallery_ids.split(',')
for gallery_id in gallery_ids:
image_gallery_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', gallery_id)
product_image_data = self.construct_product_image()
if image_gallery_src:
product_image_data['label'] = image_gallery_src[0]['post_title']
product_image_data['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + image_gallery_src[0]['meta_value'].lstrip('/')
product_image_data['url'] = to_str(product_image_data['url']).replace('uploads/uploads', 'uploads')
product_data['images'].append(product_image_data)
sale_price = self.get_value_metadata(product_meta, '_sale_price', '')
if sale_price != '':
product_data['special_price']['price'] = to_decimal(sale_price)
start_date = self.get_value_metadata(product_meta, '_sale_price_dates_from', '')
if start_date:
product_data['special_price']['start_date'] = convert_format_time(start_date)
end_date = self.get_value_metadata(product_meta, '_sale_price_dates_to', '')
if end_date:
product_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(product_meta, '_sale_price_dates_to', ''))
else:
product_data['special_price']['price'] = self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000)
if not product_data['special_price']['price']:
product_data['special_price']['price'] = 0
crosssell_ids = self.get_value_metadata(product_meta, '_crosssell_ids', '')
if crosssell_ids:
crosssell_ids_data = php_unserialize(crosssell_ids)
if crosssell_ids_data:
for crosssell_id in crosssell_ids_data:
relation = self.construct_product_relation()
relation['id'] = crosssell_id
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['children'].append(relation)
parent_crosssell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_crosssell_ids')
if parent_crosssell_list:
for parent_crosssell in parent_crosssell_list:
if parent_crosssell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_crosssell['post_id']
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['parent'].append(relation)
upsell_ids = self.get_value_metadata(product_meta, '_upsell_ids', '')
if upsell_ids:
upsell_ids_data = php_unserialize(upsell_ids)
if upsell_ids_data:
for upsell_id in upsell_ids_data:
relation = self.construct_product_relation()
relation['id'] = upsell_id
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['children'].append(relation)
parent_upsell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_upsell_ids')
if parent_upsell_list:
for parent_upsell in parent_upsell_list:
if parent_upsell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_upsell['post_id']
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['parent'].append(relation)
product_data['tax']['code'] = self.get_value_metadata(product_meta, '_tax_class', 'standard') if self.get_value_metadata(product_meta, '_tax_status', 'taxable') != 'none' else None
product_data['tax']['status'] = self.get_value_metadata(product_meta, '_tax_status', 'taxable')
# category product
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
product_category_data = self.construct_product_category()
product_category_data['id'] = product_category['term_id']
product_category_data['code'] = product_category['slug']
product_data['categories'].append(product_category_data)
if self._notice['src']['support']['manufacturers']:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'product_brand')
if not manu_src:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'pwb-brand')
if manu_src:
product_manufacturer_data = dict()
product_manufacturer_data['id'] = manu_src['term_id']
product_manufacturer_data['name'] = manu_src['name']
product_manufacturer_data['code'] = manu_src['slug']
product_data['manufacturer'] = product_manufacturer_data
# tags
product_tags = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_tag')
if product_tags:
tags = list()
for product_tag in product_tags:
tags.append(product_tag['name'])
if tags:
product_data['tags'] = ','.join(tags)
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
product_data['seo'] = getattr(self, 'products_' + detect_seo)(product, products_ext)
# TODO: convert product languages
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(products_ext['data']['icl_translations'], 'element_id', product['ID'], 'trid')
if trid:
language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_lang'], 'trid', trid)
if language_datas:
for language_data in language_datas:
if not language_data['post_title']:
continue
meta_language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_meta'], 'post_id', language_data['ID'])
term_relationship_language = get_list_from_list_by_field(products_ext['data']['wpml_term_relationship'], 'object_id', language_data['ID'])
product_new_data = self.construct_product_lang()
product_new_data['name'] = language_data['post_title']
product_new_data['code'] = language_data['post_name']
product_new_data['description'] = language_data['post_content']
product_new_data['short_description'] = language_data['post_excerpt']
product_new_data['meta_description'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_metadesc', '')
product_new_data['meta_title'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_title', '')
if product_new_data['meta_title']:
product_new_data['meta_title'] = product_new_data['meta_title'].replace('%%title%%', product_new_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
wpml_product_tags = get_list_from_list_by_field(term_relationship_language, 'taxonomy', 'product_tag')
if wpml_product_tags:
wpml_tags = list()
for wpml_product_tag in wpml_product_tags:
wpml_tags.append(wpml_product_tag['name'])
if wpml_tags:
product_new_data['tags'] = ','.join(wpml_tags)
if not language_data['source_language_code']:
product_data['language_default'] = language_data['language_code']
product_data['languages'][language_data['language_code']] = product_new_data
else:
product_language_data = self.construct_product_lang()
product_language_data['name'] = product['post_title']
product_language_data['description'] = product['post_content']
product_language_data['short_description'] = product['post_excerpt']
language_id = self._notice['src']['language_default']
product_data['languages'][language_id] = product_language_data
# attribute product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
# todo: attribute
product_attribute = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_attributes', 'meta_value')
product_attribute = php_unserialize(product_attribute)
if isinstance(product_attribute, str):
product_attribute = php_unserialize(product_attribute)
src_option_values = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
attribute_variants = list()
if product_attribute:
for attribute_key, attribute in product_attribute.items():
if to_int(attribute.get('is_taxonomy')) > 0:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute_key).replace('pa_', ''))
if not woo_attribute:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute['name']).replace('pa_', ''))
else:
woo_attribute = None
if woo_attribute:
# attributes
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = woo_attribute['attribute_id']
option_code = to_str(woo_attribute['attribute_name']).lower()
attribute_data['option_code'] = option_code.strip()
attribute_data['option_type'] = woo_attribute['attribute_type']
attribute_data['option_name'] = woo_attribute['attribute_label']
attribute_data['option_group'] = woo_attribute['attribute_orderby']
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
attribute_data['is_taxonomy'] = True if to_int(attribute.get('is_taxonomy')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = woo_attribute['attribute_label']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
tmp_values = list()
desc = list()
for option_value in src_option_values:
attribute_name = 'pa_' + to_str(woo_attribute['attribute_name']).lower()
if 'taxonomy' in option_value:
if option_value['taxonomy'] == attribute_name:
woo_term_values = get_list_from_list_by_field(
products_ext['data']['term_attribute'], 'term_id', option_value['term_id'])
if woo_term_values:
for woo_term in woo_term_values:
attribute_value = woo_term['name']
if woo_attribute['attribute_type'] in ['select', 'alg_wc_civs_image']:
option_values = to_str(woo_term['name']).split('|')
if option_values and to_len(option_values) > 1:
attribute_value = ';'.join(option_values)
tmp_values.append(attribute_value)
desc.append(option_value['description'])
values = list(map(lambda x: x.strip(), tmp_values))
if values and to_len(values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
attribute_data['option_value_name'] = ';'.join(values)
attribute_data['option_value_description'] = ';'.join(desc)
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': ';'.join(values)
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
else:
if ('is_visible' in attribute and to_int(attribute['is_visible']) == 1) or ('visible' in attribute and attribute['visible'] == 'yes'):
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
attribute_value = attribute['value']
if attribute_value and attribute_value != '':
option_values = list()
if isinstance(attribute_value, dict):
for key, attr_value in attribute_value.items():
option_values.append(attr_value)
else:
option_values = attribute_value.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = 'multiselect'
option_values = list(map(lambda x: x.strip(), option_values))
attribute_value = ';'.join(option_values)
attribute_data['option_value_name'] = attribute_value
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': attribute_value
}
# product_data['attributes'].append(attribute_data)
else:
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
option_values = attribute['value']
if option_values != '':
option_values = option_values.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
option_values = list(map(lambda x: x.strip(), option_values))
option_values = ';'.join(option_values)
attribute_data['option_value_name'] = option_values
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': option_values
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
# end
# todo: plugin add-ons
if self._notice['src']['support'].get('addons') and not self.is_woo2woo():
product_addons = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_addons', 'meta_value')
product_addons = php_unserialize(product_addons)
if product_addons and to_len(product_addons) > 0:
for product_addon in product_addons:
if not product_addon.get('options') or to_len(product_addon['options']) == 0:
continue
if product_addon.get('type') == 'radiobutton':
option_type = self.OPTION_RADIO
else:
option_type = self.OPTION_SELECT
product_option = self.construct_product_option()
product_option['code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_name'] = product_addon.get('name')
product_option['type'] = option_type
product_option['position'] = product_addon.get('position')
product_option['required'] = True if product_addon.get('required') and to_int(product_addon.get('required')) == 1 else False
product_addon_options = list()
if isinstance(product_addon.get('options'), dict):
for key, product_addon_value in product_addon['options'].items():
product_addon_options.append(product_addon_value)
else:
product_addon_options = product_addon.get('options')
for product_addon_value in product_addon_options:
product_option_value = self.construct_product_option_value()
product_option_value['code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_name'] = product_addon_value.get('label')
product_option_value['option_value_price'] = product_addon_value.get('price')
if 'Color' in product_addon.get('name', '') or 'Colour' in product_addon.get('name', ''):
if 'RNBP' in product_addon_value.get('label', ''):
product_option_value['thumb_image']['path'] = self.convert_attribute_code(to_str(product_addon_value.get('label')).replace(' (RNBP)', '')) + '.jpg'
product_option_value['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/assets/blind-images/rnbp/'
product_option['values'].append(product_option_value)
product_data['options'].append(product_option)
# todo: downloadable
product_downloadables = get_row_value_from_list_by_field(product_meta, 'meta_key', '_downloadable_files', 'meta_value')
product_downloadables = php_unserialize(product_downloadables)
if product_downloadables:
product_data['type'] = self.PRODUCT_DOWNLOAD
for key, product_downloadable in product_downloadables.items():
download_data = self.construct_product_downloadable()
download_data['limit'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_limit', 'meta_value')
download_data['max_day'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_expiry', 'meta_value')
name_file = to_str(product_downloadable['file']).split('/') if product_downloadable.get('file') else None
if self._notice['src']['cart_url'] in product_downloadable['file'] and name_file:
download_data['name'] = to_str(product_downloadable['file']).split('/')
download_data['path'] = 'woocommerce/' + to_str(name_file[to_len(name_file) - 1]).lower()
else:
download_data['name'] = product_downloadable['name']
download_data['path'] = product_downloadable['file']
# Thieu max_day,limit
product_data['downloadable'].append(download_data)
# todo: group product
child_group_product = self.get_value_metadata(product_meta, '_children', '')
if child_group_product:
child_group_product = php_unserialize(child_group_product)
if child_group_product and to_len(child_group_product) > 0:
for child_group_product_id in child_group_product:
product_data['group_child_ids'].append({
'id': child_group_product_id
})
product_data['type'] = self.PRODUCT_GROUP
# todo: child product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
all_child = dict()
child_attributes = dict()
if product_child_src:
product_data['type'] = self.PRODUCT_CONFIG
for product_child in product_child_src:
child_attributes[product_child['ID']] = dict()
child_data = self.construct_product_child()
child_data = self.add_construct_default(child_data)
child_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product_child['ID'])
child_data['id'] = product_child['ID']
child_data['sku'] = self.get_value_metadata(child_meta, '_sku', '') if self.get_value_metadata(child_meta, '_sku', '') else self.get_value_metadata(product_meta, '_sku', '')
child_data['code'] = product_child['post_name']
child_product_price = ''
if self.get_value_metadata(child_meta, '_regular_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_regular_price')
else:
if self.get_value_metadata(child_meta, '_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_price', 0.0000)
else:
child_product_price = 0
if child_product_price == '' or not child_product_price:
child_product_price = 0
child_data['price'] = child_product_price
child_data['weight'] = self.get_value_metadata(child_meta, '_weight') if self.get_value_metadata(child_meta, '_weight') else product_data['weight']
child_data['length'] = self.get_value_metadata(child_meta, '_length') if self.get_value_metadata(child_meta, '_length') else product_data['length']
child_data['width'] = self.get_value_metadata(child_meta, '_width') if self.get_value_metadata(child_meta, '_width') else product_data['width']
child_data['height'] = self.get_value_metadata(child_meta, '_height') if self.get_value_metadata(child_meta, '_height') else product_data['height']
child_data['status'] = True if product_child['post_status'] == "publish" else False
child_data['manage_stock'] = True if self.get_value_metadata(child_meta, '_manage_stock') == 'yes' else False
if self.is_woo2woo():
child_data['is_in_stock'] = self.get_value_metadata(child_meta, '_stock_status', 'instock')
child_data['sold_individually'] = self.get_value_metadata(child_meta, '_sold_individually', '')
child_data['purchase_note'] = self.get_value_metadata(child_meta, '_purchase_note', '')
else:
child_data['is_in_stock'] = True if self.get_value_metadata(child_meta, '_stock_status', 'instock') == "instock" else False
child_data['qty'] = to_int(to_decimal(self.get_value_metadata(child_meta, '_stock'))) if self.get_value_metadata(child_meta, '_stock') else 0
child_data['created_at'] = convert_format_time(product_child['post_date'])
child_data['updated_at'] = convert_format_time(product_child['post_modified'])
child_data['name'] = product_child['post_title']
child_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_data['tax']['code'] = self.get_value_metadata(child_meta, '_tax_class', 'standard')
child_data['short_description'] = ''
# image_
thumbnail_id = self.get_value_metadata(child_meta, '_thumbnail_id')
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
child_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
child_data['thumb_image']['path'] = thumbnail_src[0]['meta_value']
child_data['thumb_image']['url'] = to_str(thumbnail_src[0]['guid']).replace(thumbnail_src[0]['meta_value'], '')
sale_price = self.get_value_metadata(child_meta, '_sale_price')
if sale_price != '':
child_data['special_price']['price'] = sale_price
child_data['special_price']['start_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_from'))
child_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_to'))
child_product_language_data = self.construct_product_lang()
child_product_language_data['name'] = product_child['post_title']
child_product_language_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_product_language_data['short_description'] = product_child['post_excerpt']
language_id = self._notice['src']['language_default']
child_data['languages'][language_id] = child_product_language_data
attr_child = self.get_list_from_list_by_field_as_first_key(child_meta, 'meta_key', 'attribute_')
child_data['options'] = list()
child_data['attributes'] = list()
for attribute in attr_child:
# attribute
attribute_child_data = self.construct_product_attribute()
attr_name = to_str(attribute['meta_key']).replace('attribute_', '')
element_type = 'tax_' + attr_name
attr_name = attr_name.replace('pa_', '')
attr_name = attr_name.strip()
option_id = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
attribute_child_data['option_id'] = option_id if option_id else ''
option_name = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label')
attribute_child_data['option_name'] = option_name if option_name else attr_name
option_code = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name')
attribute_child_data['option_code'] = option_code if option_code else attr_name.lower()
option_type = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type')
# attribute_child_data['option_type'] = option_type if option_type else 'select'
attribute_child_data['option_type'] = self.OPTION_SELECT
option_group = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby')
attribute_child_data['option_group'] = option_group if option_group else 'menu_order'
# attribute language
child_attribute_language_data = self.construct_product_option_lang()
child_attribute_language_data['option_name'] = attribute_child_data['option_name']
language_id = self._notice['src']['language_default']
attribute_child_data['option_languages'][language_id] = child_attribute_language_data
# values
attribute_child_data['option_value_id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
option_value_name = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
attribute_child_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_code'] = to_str(attribute['meta_value']).lower()
attribute_child_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') else ''
language_id = self._notice['src']['language_default']
child_attribute_value_language_data = self.construct_product_option_value_lang()
child_attribute_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_languages'][language_id] = child_attribute_value_language_data
child_data['attributes'].append(attribute_child_data)
# options
child_option_data = self.construct_product_option()
child_option_data['id'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
child_option_data['code'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') else attr_name.lower()
child_option_data['option_name'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') else attr_name
child_option_data['option_code'] = child_option_data['code']
child_option_data['option_group'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') else 'menu_order'
# child_option_data['option_type'] = self.OPTION_SELECT
child_option_data['option_type'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') else 'select'
child_option_data['required'] = 1
# option language
child_option_language_data = self.construct_product_option_lang()
child_option_language_data['option_name'] = attr_name
language_id = self._notice['src']['language_default']
child_option_data['option_languages'][language_id] = child_option_language_data
# value option
child_option_value_data = self.construct_product_option_value()
child_option_value_data['id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
child_option_value_data['code'] = attribute['meta_value']
child_option_value_data['option_value_code'] = attribute['meta_value']
child_option_value_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else child_option_value_data['code']
child_option_value_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'name') else ''
# value language
child_option_value_language_data = self.construct_product_option_value_lang()
child_option_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
language_id = self._notice['src']['language_default']
child_option_value_data['option_value_languages'][language_id] = child_option_value_language_data
child_option_data['values'].append(child_option_value_data)
child_attributes[product_child['ID']][child_option_data['option_name']] = child_option_value_data['option_value_name']
all_child[to_str(product_child['ID'])] = child_data
# todo: bundle product - product bundle plugin: WPC Product Bundles for WooCommerce (Premium)
if self._notice['src']['support']['product_bundle']:
product_data['bundle_selection'] = list()
product_bundles = get_row_value_from_list_by_field(product_meta, 'meta_key', 'woosb_ids', 'meta_value')
if product_bundles:
product_data['type'] = self.PRODUCT_BUNDLE
product_bundle_list = to_str(product_bundles).split(',')
if product_bundle_list and to_len(product_bundle_list) > 0:
for product_bundle_child in product_bundle_list:
product_bundle_ids = to_str(product_bundle_child).split('/')
if product_bundle_ids and to_len(product_bundle_ids) > 0:
product_bundle_data = {
'product_id': product_bundle_ids[0],
'selection_qty': product_bundle_ids[1] if to_len(product_bundle_ids) > 1 else 1
}
product_data['bundle_selection'].append(product_bundle_data)
if self.is_woo2woo():
product_data['children'] = list(all_child.values())
else:
len_child = 1
for attribute_variant in attribute_variants:
len_child *= to_len(attribute_variant['option_value_name'].split(';'))
options_src = dict()
for attribute_variant in attribute_variants:
values = to_str(attribute_variant['option_value_name']).split(';')
option_data = self.construct_product_option()
option_data['id'] = attribute_variant['option_id']
option_data['option_name'] = attribute_variant['option_name']
option_data['option_code'] = attribute_variant['option_code']
option_data['option_type'] = 'select'
for value in values:
if len_child > self.VARIANT_LIMIT:
option_data_value = self.construct_product_option_value()
option_data_value['option_value_name'] = value
option_data['values'].append(option_data_value)
opt_val = {
'option_name': attribute_variant['option_name'],
'option_code': attribute_variant['option_code'],
'option_languages': attribute_variant['option_languages'],
'option_id': attribute_variant['option_id'],
'option_value_name': value,
}
if attribute_variant['option_name'] not in options_src:
options_src[attribute_variant['option_name']] = list()
options_src[attribute_variant['option_name']].append(opt_val)
if len_child > self.VARIANT_LIMIT:
product_data['options'].append(option_data)
if len_child <= self.VARIANT_LIMIT and child_attributes:
combinations = self.combination_from_multi_dict(options_src)
list_child = list()
if combinations:
for combination in combinations:
if not combination:
continue
children_id = None
check_any = False
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination) and child_id not in list_child:
children_id = child_id
list_child.append(child_id)
break
if not children_id:
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination, True) and child_id not in list_child:
children_id = child_id
check_any = True
break
if not children_id:
continue
child = copy.deepcopy(all_child[children_id])
child['attributes'] = list()
for attribute in combination:
attribute_data = self.construct_product_attribute()
attribute_data['option_name'] = attribute['option_name']
attribute_data['option_code'] = attribute['option_code']
attribute_data['option_languages'] = attribute['option_languages']
attribute_data['option_id'] = attribute['option_id']
attribute_data['option_value_name'] = attribute['option_value_name']
child['attributes'].append(attribute_data)
product_data['children'].append(child)
else:
if attribute_variants:
product_data['attributes'] = attribute_variants
return response_success(product_data)
def get_product_id_import(self, convert, product, products_ext):
return product['ID']
def check_product_import(self, convert, product, products_ext):
return self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
def update_latest_data_product(self, product_id, convert, product, products_ext):
all_query = list()
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
# todo: update product name
# begin
product_query = self.create_update_query_connector("posts", {'ID': product_id, 'post_title': convert['name']}, {'ID': product_id})
all_query.append(product_query)
# end
old_url_key = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], 'code_desc')
# todo: update product category
# begin
category_desc = self.select_all_category_map()
all_categories = list()
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, lang = language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
# todo: delete old category product
query_cate = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` IN " + self.list_to_in_condition(category_desc) + ""
}
self.query_data_connector(query_cate, 'update_product')
for cate_id in all_categories:
query_cate_prod = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` = " + to_str(cate_id) + ""
}
check_product_category = self.select_data_connector(query_cate_prod, 'category_product')
if (not check_product_category) or check_product_category['result'] != 'success' or (to_len(check_product_category['data']) == 0):
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_query.append(category_query)
# End
stock_status = 'instock'
if 'is_in_stock' in convert:
stock_status = 'instock' if convert['is_in_stock'] else 'outofstock'
else:
stock_status = 'outofstock' if convert['manage_stock'] else 'instock'
tax_class = ''
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], field = 'code_desc')
product_meta = {
'_stock_status': stock_status,
'_downloadable': "yes" if convert['type'] == self.PRODUCT_DOWNLOAD else "no",
'_virtual': "yes" if convert['type'] == self.PRODUCT_VIRTUAL else "no",
'_regular_price': convert['price'],
'_sale_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00') or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None) else "",
'_tax_status': convert['tax'].get('status', ("taxable" if to_int(convert['tax']['id']) or convert['tax']['code'] else "none")),
'_tax_class': tax_class if tax_class else '',
'_weight': convert['weight'] if convert['weight'] else '',
'_length': convert['length'] if convert['length'] else '',
'_width': convert['width'] if convert['width'] else '',
'_height': convert['height'] if convert['height'] else '',
'_sku': convert['sku'],
'_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else convert['price'],
'_manage_stock': "yes" if convert['manage_stock'] or convert['manage_stock'] == True else "no",
'_stock': convert['qty'] if convert['qty'] else 0,
# 'show_on_pos': '1' if convert['pos'] else 0,
}
if convert['special_price']['start_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_from'] = self.to_timestamp(convert['special_price']['start_date'])
if convert['special_price']['end_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_to'] = self.to_timestamp(convert['special_price']['end_date'])
if 'group_prices' in convert and to_len(convert['group_prices']) > 0:
product_meta['wholesale_customer_wholesale_price'] = convert['group_prices'][0]['price']
all_meta_queries = list()
for meta_key, meta_value in product_meta.items():
meta_insert = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(product_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query = self.create_update_query_connector("postmeta", meta_insert, {'post_id': product_id, 'meta_key': meta_key})
all_query.append(meta_query)
# todo: update children
children_list = list()
option_list = list()
if convert['children']:
children_list = convert['children']
else:
if convert['options']:
option_list = convert['options']
if self.count_child_from_option(convert['options']) <= self.VARIANT_LIMIT:
children_list = self.convert_option_to_child(option_list, convert)
if children_list and to_len(children_list) <= self.VARIANT_LIMIT:
for key_child, product_child in enumerate(children_list):
children_id = self.get_map_field_by_src(self.TYPE_CHILD, product_child['id'], product_child['code'], lang = language_code)
if not children_id:
continue
if product_child.get('is_in_stock'):
child_stock_status = 'instock' if product_child['is_in_stock'] else 'outofstock'
else:
child_stock_status = 'outofstock' if product_child['manage_stock'] else 'instock'
children_meta = {
'_stock_status': child_stock_status,
'_sku': product_child['sku'] if product_child['sku'] else '',
'_weight': product_child['weight'] if product_child['weight'] else '',
'_length': product_child['length'] if product_child['length'] else '',
'_width': product_child['width'] if product_child['width'] else '',
'_height': product_child['height'] if product_child['height'] else '',
'_manage_stock': "yes" if product_child['manage_stock'] else "no",
'_stock': product_child['qty'] if product_child['qty'] else 0,
'_regular_price': product_child['price'],
'_sale_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
'_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
}
if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
if product_child['special_price']['start_date']:
children_meta['_sale_price_dates_from'] = self.to_timestamp(product_child['special_price']['start_date'])
if product_child['special_price']['end_date']:
children_meta['_sale_price_dates_to'] = self.to_timestamp(product_child['special_price']['end_date'])
for meta_key, meta_value in children_meta.items():
meta_insert_child = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(children_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query_child = self.create_update_query_connector('postmeta', meta_insert_child, {'post_id': children_id, 'meta_key': meta_key})
all_query.append(meta_query_child)
# todo: seo
# begin
if self.is_exist_lecm_rewrite():
if (self._notice['config']['seo'] or self._notice['config']['seo_301']) and convert['seo']:
delete_query = list()
delete_query.append(self.create_delete_query_connector('lecm_rewrite', {'type': 'product', 'type_id': product_id}))
self.query_multiple_data_connector(delete_query)
for seo_url in convert['seo']:
if not seo_url['request_path']:
continue
if old_url_key != seo_url['request_path'].replace(' ', ''):
query_check = {
'link': seo_url['request_path']
}
if self.is_wpml() and convert.get('language_code'):
query_check['lang'] = convert['language_code']
seo_query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_lecm_rewrite WHERE " + self.dict_to_where_condition(query_check)
}
check_seo_exit = self.select_data_connector(seo_query, 'lecm_rewrite')
if check_seo_exit and check_seo_exit['result'] == 'success' and to_len(check_seo_exit['data']) > 0:
continue
else:
le_url_rewrite = {
'link': to_str(seo_url['request_path']).rstrip('/'),
'type': 'product',
'type_id': product_id
}
if self.is_wpml():
le_url_rewrite['lang'] = convert.get('language_code')
if self._notice['config']['seo_301']:
le_url_rewrite['redirect_type'] = 301
self.import_data_connector(self.create_insert_query_connector("lecm_rewrite", le_url_rewrite), 'seo_product')
self.import_multiple_data_connector(all_query, 'update_product')
if self.is_wpml() and not convert.get('language_code'):
where_product_wpml = {
'migration_id': self._migration_id,
'type': 'product',
}
if convert['id']:
where_product_wpml['id_src'] = convert['id']
else:
where_product_wpml['code'] = convert['code']
product_wpml = self.select_obj(TABLE_MAP, where_product_wpml)
if product_wpml['result'] == 'success' and product_wpml['data']:
for product_wpml_row in product_wpml['data']:
if product_wpml_row['id_desc'] == product_id or not product_wpml_row.get('lang'):
continue
convert_wpml = self.get_convert_data_language(convert, target_language_id = language_code)
convert_wpml['language_code'] = product_wpml_row['lang']
self.update_latest_data_product(product_wpml_row['id_desc'], convert_wpml, product, products_ext)
return response_success()
def update_product_after_demo(self, product_id, convert, product, products_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
all_queries = list()
query_delete = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id = ' + to_str(product_id) + ' AND term_taxonomy_id IN (SELECT term_taxonomy_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['product_brand', 'product_cat']) + ')'
}
all_queries.append(query_delete)
# category
all_categories = list()
if convert['categories']:
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
for cate_id in all_categories:
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_queries.append(category_query)
if self._notice['target']['support']['manufacturers']:
if convert['manufacturer']['id'] or convert['manufacturer']['name']:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['manufacturer']['id'])
if not manufacturer_id:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, None, convert['manufacturer']['id'])
if manufacturer_id:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(category_query)
elif convert['manufacturer']['name']:
slug = self.sanitize_title(convert['manufacturer']['name'])
manufacturer_term = {
'name': convert['manufacturer']['name'],
'slug': slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'manufacturer')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': 'product_brand',
'description': '',
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if manufacturer_taxonomy_import:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
relationship_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(relationship_query)
self.insert_map(self.TYPE_MANUFACTURER, convert['manufacturer']['id'], manufacturer_taxonomy_import, convert['manufacturer']['name'])
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], 'code_desc')
if tax_class:
meta_insert = {
'post_id': product_id,
'meta_key': '_tax_class',
'meta_value': tax_class
}
where_meta = {
'post_id': product_id,
'meta_key': '_tax_class',
}
all_queries.append(self.create_update_query_connector('postmeta', meta_insert, where_meta))
self.import_multiple_data_connector(all_queries, 'update_product')
return response_success()
def router_product_import(self, convert, product, products_ext):
return response_success('product_import')
def before_product_import(self, convert, product, products_ext):
return response_success()
def product_import(self, convert, product, products_ext):
product_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_content': convert['description'],
'post_title': convert['name'],
'post_excerpt': '',
'post_status': 'publish' if convert['status'] else 'closed',
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': convert['name'],
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': '',
'menu_order': 0,
'post_type': 'product',
'post_mime_type': '',
'comment_count': 0 ,
}
id_product = self.import_product_data_connector(self.create_insert_query_connector('posts', product_data))
self.insert_map(self.TYPE_PRODUCT, convert['id'], id_product, convert['code'])
thumbnail_id = False
product_image = ''
if convert['thumb_image']['url'] or convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'],
convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(
self.make_woocommerce_image_path(image_process['path'], self.TYPE_PRODUCT),
self._notice['target']['config']['image_product'].rstrip('/')))
if image_import_path:
product_image = self.remove_prefix_path(image_import_path,
self._notice['target']['config']['image_product'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(product_image, image_details, convert=convert)
product_meta = {
'_edit_lock': '',
'_edit_last': 1,
'_thumbnail_id': thumbnail_id,
'_regular_price': convert['price'],
'_sale_price': convert['price'],
'total_sales': 0,
'_tax_status': 'taxable',
'_tax_class': '',
'_manage_stock': 'yes' if convert['manage_stock'] else 'no',
'_backorders': 'no',
'_sold_individually': 'yes',
'_virtual': 'no',
'_downloadable': 'no',
'_download_limit': -1,
'_download_expiry': -1,
'_stock': convert['qty'],
'_stock_status': 'instock' if convert['qty'] else 'outofstock',
'_wc_average_rating': '',
'_wc_review_count': 0,
'_product_version': '4.5.1',
'_price': convert['price'],
'_weight': convert['weight'],
'_length': convert['length'],
'_width': convert['width'],
'_height': convert['height'],
'_sku': convert['sku'],
}
for meta_key, meta_value in product_meta.items():
meta_data = {
'post_id': id_product,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('postmeta', data=meta_data), 'products')
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy WHERE taxonomy = 'product_type'"
}
product_type = self.select_data_connector(query)['data'][0]['term_taxonomy_id']
for category in convert['categories']:
term_relate = {
'object_id': id_product,
'term_taxonomy_id': product_type
}
self.import_data_connector(self.create_insert_query_connector('term_relationships', data=term_relate), 'products')
id_category = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'])
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy WHERE term_id = " + to_str(id_category)
}
id_taxonomy = self.select_data_connector(query)['data'][0]['term_taxonomy_id']
term_relate = {
'object_id': id_product,
'term_taxonomy_id': id_taxonomy
}
self.import_data_connector(self.create_insert_query_connector('term_relationships', data=term_relate), 'products')
return response_success(id_product)
def after_product_import(self, product_id, convert, product, products_ext):
return response_success()
def addition_product_import(self, convert, product, products_ext):
return response_success()
def finish_product_import(self):
if self.is_variant_limit:
self._notice['config']['variant_limit'] = True
return response_success()
# TODO: CUSTOMER
# def prepare_customers_import(self):
# return self
#
# def prepare_customers_export(self):
# return self
def prepare_customers_import(self):
if self._notice['config'].get('cus_pass'):
delete_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'LEPP_TYPE' OR option_name = 'LEPP_URL'"
}
config_delete = self.import_data_connector(delete_query)
all_queries = list()
type_data = {
'option_name': 'LEPP_TYPE',
'option_value': self._notice['src']['cart_type'],
'autoload': 'yes'
}
type_query = self.create_insert_query_connector('options', type_data)
all_queries.append(type_query)
url_data = {
'option_name': 'LEPP_URL',
'option_value': self._notice['src']['cart_url'],
'autoload': 'yes'
}
url_query = self.create_insert_query_connector('options', url_data)
all_queries.append(url_query)
if all_queries:
self.import_multiple_data_connector(all_queries, 'customer')
return self
def get_customers_main_export(self):
id_src = self._notice['process']['customers']['id_src']
limit = self._notice['setting']['customers']
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
query = {
'type': 'select',
'query': "SELECT * FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND ID > " + to_str(id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# customers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
customers = self.select_data_connector(query, 'customers')
if not customers or customers['result'] != 'success':
return response_error()
return customers
def get_customers_ext_export(self, customers):
url_query = self.get_connector_url('query')
customers_ids = duplicate_field_value_from_list(customers['data'], 'ID')
customer_ext_queries = {
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE user_id IN " + self.list_to_in_condition(
customers_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
customer_ext_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
customer_ext_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
# customers_ext = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(customer_ext_queries)})
customers_ext = self.select_multiple_data_connector(customer_ext_queries, 'customers')
if not customers_ext or customers_ext['result'] != 'success':
return response_error()
return customers_ext
def convert_customer_export(self, customer, customers_ext):
customer_data = self.construct_customer()
customer_data = self.add_construct_default(customer_data)
customer_data['id'] = customer['ID']
customer_data['code'] = customer['user_login']
customer_data['username'] = customer['user_nicename']
customer_data['email'] = customer['user_email']
customer_data['password'] = customer['user_pass']
customer_data['website'] = customer['user_url']
customer_data['user_url'] = customer['user_url']
customer_data['active'] = True
customer_data['created_at'] = convert_format_time(customer['user_registered'])
customer_meta = get_list_from_list_by_field(customers_ext['data']['user_meta'], 'user_id', customer['ID'])
customer_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'first_name', 'meta_value')
customer_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'last_name', 'meta_value')
prefix = self._notice['src']['config']['table_prefix']
capabilities = to_str(prefix) + '_capabilities'
customer_data['capabilities'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', capabilities, 'meta_value')
# billing
address_data = self.construct_customer_address()
address_data['code'] = to_str(customer['ID']) + "_1"
address_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_first_name', 'meta_value')
address_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_last_name', 'meta_value')
address_data['address_1'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_1', 'meta_value')
address_data['address_2'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_2', 'meta_value')
address_data['city'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_city', 'meta_value')
address_data['postcode'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_postcode', 'meta_value')
address_data['telephone'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_phone', 'meta_value')
address_data['company'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_company', 'meta_value')
address_data['fax'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_fax', 'meta_value')
address_data['country']['country_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_country', 'meta_value')
address_data['country']['code'] = address_data['country']['country_code']
address_data['country']['name'] = self.get_country_name_by_code(address_data['country']['country_code'])
address_data['state']['state_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_state', 'meta_value')
address_data['state']['code'] = address_data['state']['state_code']
address_data['default']['billing'] = True
if address_data['address_1'] or address_data['address_2']:
customer_data['address'].append(address_data)
# shipping
shipping_address = self.get_list_from_list_by_field_as_first_key(customer_meta, 'meta_key', 'shipping_')
if shipping_address:
shipping_data = self.construct_customer_address()
shipping_data['code'] = to_str(customer['ID']) + "_2"
shipping_data['first_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_first_name', 'meta_value')
shipping_data['last_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_last_name', 'meta_value')
shipping_data['address_1'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_1', 'meta_value')
shipping_data['address_2'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_2', 'meta_value')
shipping_data['city'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_city', 'meta_value')
shipping_data['postcode'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_postcode', 'meta_value')
shipping_data['telephone'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_phone', 'meta_value')
shipping_data['company'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_company', 'meta_value')
shipping_data['fax'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_fax', 'meta_value')
shipping_data['country']['country_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_country', 'meta_value')
shipping_data['country']['code'] = shipping_data['country']['country_code']
shipping_data['country']['name'] = self.get_country_name_by_code(shipping_data['country']['code'])
shipping_data['state']['state_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_state', 'meta_value')
shipping_data['state']['code'] = shipping_data['state']['state_code']
shipping_data['default']['shipping'] = True
if shipping_data['address_1'] or shipping_data['address_2']:
customer_data['address'].append(shipping_data)
# customer_data['first_name'] = customer_data['first_name'] if customer_data['first_name']: address_data['first_name']
# customer_data['last_name'] = customer_data['last_name'] if customer_data['last_name']: address_data['last_name']
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
customer_point_rewards['points_balance'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'wc_points_balance', 'meta_value')
wc_points_rewards_user_points = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points'], 'user_id', customer['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['order_id'] = points_rewards_user_points['order_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points_log'], 'user_id', customer['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['order_id'] = points_rewards_user_points_log['order_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
customer_data['point_rewards'] = customer_point_rewards
return response_success(customer_data)
def get_customer_id_import(self, convert, customer, customers_ext):
return customer['ID']
def check_customer_import(self, convert, customer, customers_ext):
return True if self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['id'], convert['code']) else False
def router_customer_import(self, convert, customer, customers_ext):
return response_success('customer_import')
def before_customer_import(self, convert, customer, customers_ext):
return response_success()
def customer_import(self, convert, customer, customers_ext):
customer_data = {
'user_login': convert['username'],
'user_pass': convert['password'],
'user_nicename': convert['username'],
'user_email': convert['email'],
'user_url': '',
'user_registered': convert['created_at'],
'user_activation_key': '',
'user_status': True,
'display_name': convert['first_name'],
}
id_customer = self.import_customer_data_connector(self.create_insert_query_connector('users', customer_data))
self.insert_map(self.TYPE_CUSTOMER, convert['id'], id_customer, convert['code'])
user_data = {
'nickname': convert['username'],
'first_name': convert['first_name'],
'last_name': convert['last_name'],
'description': '',
'rich_editing': True,
'syntax_highlighting': True,
'comment_shortcuts': False,
'admin_color': 'fresh',
'use_ssl': 0,
'show_admin_bar_front': True,
'locale': '',
'wp_capabilities': 'a:1:{s:8:"customer";b:1;}',
'dismissed_wp_pointers': ''
}
for meta_key, meta_value in user_data.items():
meta_data = {
'user_id': id_customer,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('usermeta', data=meta_data), 'customers')
return response_success(id_customer)
def after_customer_import(self, customer_id, convert, customer, customers_ext):
return response_success()
def addition_customer_import(self, convert, customer, customers_ext):
return response_success()
# TODO: ORDER
def prepare_orders_import(self):
return self
def prepare_orders_export(self):
return self
def get_orders_main_export(self):
id_src = self._notice['process']['orders']['id_src']
limit = self._notice['setting']['orders']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID > " + to_str(
id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# orders = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
orders = self.select_data_connector(query, 'orders')
if not orders or orders['result'] != 'success':
return response_error()
return orders
def get_orders_ext_export(self, orders):
url_query = self.get_connector_url('query')
order_ids = duplicate_field_value_from_list(orders['data'], 'ID')
customer_ext_queries = {
'woocommerce_order_items': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_items WHERE order_id IN " + self.list_to_in_condition(
order_ids),
},
'order_note': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_comments WHERE comment_post_ID IN " + self.list_to_in_condition(
order_ids),
},
'order_refund': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order_refund' AND post_parent IN " + self.list_to_in_condition(
order_ids),
},
'order_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(order_ids),
},
}
orders_ext = self.select_multiple_data_connector(customer_ext_queries, 'orders')
if not orders_ext or orders_ext['result'] != 'success':
return response_error()
# product_option_value_ids = duplicate_field_value_from_list(orders_ext['data']['order_option'], 'product_option_value_id')
# order_recurrings = duplicate_field_value_from_list(orders_ext['data']['order_recurring'], 'order_recurring_id')
order_item_ids = duplicate_field_value_from_list(orders_ext['data']['woocommerce_order_items'], 'order_item_id')
comment_ids = duplicate_field_value_from_list(orders_ext['data']['order_note'], 'comment_ID')
refund_ids = duplicate_field_value_from_list(orders_ext['data']['order_refund'], 'ID')
post_meta_ids = list(set(refund_ids + order_ids))
cus_list = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'meta_key', '_customer_user')
cus_ids = list()
if cus_list:
cus_ids = duplicate_field_value_from_list(cus_list, 'meta_value')
orders_ext_rel_queries = {
'woocommerce_order_itemmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN " + self.list_to_in_condition(order_item_ids),
},
'order_note_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(comment_ids),
},
'postmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(post_meta_ids),
},
'user': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_users WHERE ID IN " + self.list_to_in_condition(cus_ids),
},
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE meta_key IN ('first_name','last_name') AND user_id IN " + self.list_to_in_condition(cus_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
orders_ext_rel_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel = self.select_multiple_data_connector(orders_ext_rel_queries, 'orders')
if not orders_ext_rel or orders_ext_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_rel)
pro_list = get_list_from_list_by_field(orders_ext_rel['data']['woocommerce_order_itemmeta'], 'meta_key', '_product_id')
pro_ids = duplicate_field_value_from_list(pro_list, 'meta_value')
orders_ext_third_rel_queries = {
'products_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(pro_ids),
},
}
orders_ext_third_rel = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(orders_ext_third_rel_queries)})
if not orders_ext_third_rel or orders_ext_third_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_third_rel)
return orders_ext
def convert_order_export(self, order, orders_ext):
order_data = self.construct_order()
order_data = self.add_construct_default(order_data)
order_data['id'] = order['ID']
order_data['status'] = order['post_status']
# order data
order_items = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_items'], 'order_id', order['ID'])
shipping = get_row_from_list_by_field(order_items, 'order_item_type', 'shipping')
taxes = get_list_from_list_by_field(order_items, 'order_item_type', 'tax')
tax_names = list()
total_tax = 0.0
if taxes:
tax_names = duplicate_field_value_from_list(taxes, 'order_item_name')
for tax in taxes:
order_tax_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', tax['order_item_id'])
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'tax_amount', 0.0))
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'shipping_tax_amount', 0.0))
if 'postmeta' in orders_ext['data']:
order_meta = get_list_from_list_by_field(orders_ext['data']['postmeta'], 'post_id', order['ID'])
else:
order_meta = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'post_id', order['ID'])
ord_number = get_row_value_from_list_by_field(order_meta, 'meta_key', '_order_number', 'meta_value')
if ord_number and self._notice['src']['support'].get('plugin_pre_ord'):
order_data['order_number'] = ord_number
order_data['tax']['title'] = '|'.join(tax_names) if tax_names else 'Tax'
order_data['tax']['amount'] = total_tax if total_tax else self.get_value_metadata(order_meta, '_order_tax', 0.0000)
order_data['shipping']['title'] = shipping['order_item_name'] if shipping else 'Shipping'
order_data['shipping']['amount'] = self.get_value_metadata(order_meta, '_order_shipping', 0.0000) # _order_shipping_tax
discount_title = get_row_value_from_list_by_field(order_items, 'order_item_type', 'coupon', 'order_item_name')
order_data['discount']['title'] = discount_title if discount_title else 'Discount'
order_data['discount']['amount'] = self.get_value_metadata(order_meta, '_cart_discount', 0.0000)
order_data['total']['title'] = 'Total'
order_data['total']['amount'] = self.get_value_metadata(order_meta, '_order_total', 0.0000)
order_data['subtotal']['title'] = 'Total'
order_data['subtotal']['amount'] = to_decimal(self.get_value_metadata(order_meta, '_order_total', 0.0000)) - to_decimal(self.get_value_metadata(order_meta, '_cart_discount', 0.0000)) - to_decimal(order_data['tax']['amount']) - to_decimal(order_data['shipping']['amount'])
order_data['currency'] = self.get_value_metadata(order_meta, '_order_currency', 'meta_value')
order_data['created_at'] = convert_format_time(order['post_date'])
order_data['updated_at'] = convert_format_time(order['post_modified'])
# order customer
order_customer = self.construct_order_customer()
order_customer = self.add_construct_default(order_customer)
order_customer_src = self.get_value_metadata(order_meta, '_customer_user', 'meta_value')
if order_customer_src and to_int(order_customer_src) > 0:
customer_src = get_row_from_list_by_field(orders_ext['data']['user'], 'ID', order_customer_src)
customer_meta_src = get_list_from_list_by_field(orders_ext['data']['user_meta'], 'user_id', order_customer_src)
if customer_src:
order_customer['id'] = order_customer_src
order_customer['code'] = get_value_by_key_in_dict(customer_src, 'user_login', '')
order_customer['email'] = get_value_by_key_in_dict(customer_src, 'user_email', self.get_value_metadata(order_meta, '_billing_email', 'meta_value'))
order_customer['username'] = get_value_by_key_in_dict(customer_src, 'display_name', '')
order_customer['first_name'] = self.get_value_metadata(customer_meta_src, 'first_name', self.get_value_metadata(order_meta, '_billing_first_name', ''))
order_customer['last_name'] = self.get_value_metadata(customer_meta_src, 'last_name', self.get_value_metadata(order_meta, '_billing_last_name', ''))
else:
order_customer['email'] = self.get_value_metadata(order_meta, '_billing_email', 'meta_value')
order_customer['username'] = order_customer['email']
order_customer['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_customer['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_data['customer'] = order_customer
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
wc_points_rewards_user_points = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points'], 'order_id', order['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['user_id'] = points_rewards_user_points['user_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points_log'], 'order_id', order['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['user_id'] = points_rewards_user_points_log['user_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
order_data['point_rewards'] = customer_point_rewards
# customer address
customer_address = self.construct_order_address()
customer_address = self.add_construct_default(customer_address)
customer_address['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
customer_address['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
customer_address['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
customer_address['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
customer_address['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
customer_address['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
customer_address['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
customer_address['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
customer_address['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
customer_address['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['name'] = self.get_country_name_by_code(customer_address['country']['country_code'])
customer_address['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
customer_address['state']['code'] = customer_address['state']['state_code']
order_data['customer_address'] = customer_address
# billing address
order_billing = self.construct_order_address()
order_billing = self.add_construct_default(order_billing)
order_billing['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_billing['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_billing['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
order_billing['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
order_billing['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
order_billing['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
order_billing['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
order_billing['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
order_billing['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
order_billing['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['name'] = self.get_country_name_by_code(order_billing['country']['country_code'])
order_billing['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
order_billing['state']['code'] = order_billing['state']['state_code']
order_billing['code'] = self.convert_attribute_code(to_str(order_billing['first_name']) + '-' + to_str(order_billing['last_name']) + '-' + to_str(order_billing['address_1']) + '-' + to_str(order_billing['address_2']))
order_data['billing_address'] = order_billing
# shipping address
order_delivery = self.construct_order_address()
order_delivery = self.add_construct_default(order_delivery)
order_delivery['first_name'] = self.get_value_metadata(order_meta, '_shipping_first_name', '')
order_delivery['last_name'] = self.get_value_metadata(order_meta, '_shipping_last_name', '')
order_delivery['email'] = self.get_value_metadata(order_meta, '_shipping_email', '')
order_delivery['address_1'] = self.get_value_metadata(order_meta, '_shipping_address_1', '')
order_delivery['address_2'] = self.get_value_metadata(order_meta, '_shipping_address_2', '')
order_delivery['city'] = self.get_value_metadata(order_meta, '_shipping_city', '')
order_delivery['postcode'] = self.get_value_metadata(order_meta, '_shipping_postcode', '')
order_delivery['telephone'] = self.get_value_metadata(order_meta, '_shipping_phone', '') if self.get_value_metadata(order_meta, '_shipping_phone', '') else self.get_value_metadata(order_meta, '_shipping_Phone_No', '')
order_delivery['company'] = self.get_value_metadata(order_meta, '_shipping_company', '')
order_delivery['country']['code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['country_code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['name'] = self.get_country_name_by_code(order_delivery['country']['country_code'])
order_delivery['state']['state_code'] = self.get_value_metadata(order_meta, '_shipping_state', '')
order_delivery['state']['code'] = order_delivery['state']['state_code']
order_delivery['code'] = self.convert_attribute_code(to_str(order_delivery['first_name']) + '-' + to_str(order_delivery['last_name']) + '-' + to_str(order_delivery['address_1']) + '-' + to_str(order_delivery['address_2']))
order_data['shipping_address'] = order_delivery
# order_data['user_history'] = self.get_value_metadata(order_meta, '_user_history', '')
order_products = get_list_from_list_by_field(order_items, 'order_item_type', 'line_item')
order_items = list()
for order_product in order_products:
order_product_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', order_product['order_item_id'])
qty = self.get_value_metadata(order_product_metas, '_qty', 1)
if to_int(qty) == 0:
qty = 1
order_item_subtotal = self.get_value_metadata(order_product_metas, '_line_subtotal', 0.0000)
order_item = self.construct_order_item()
order_item = self.add_construct_default(order_item)
order_item['id'] = order_product['order_item_id']
order_item['product']['id'] = self.get_value_metadata(order_product_metas, '_variation_id', self.get_value_metadata(order_product_metas, '_product_id', 0))
order_item['product']['code'] = self.get_value_metadata(order_product_metas, '_product_code', 0)
product_meta = get_list_from_list_by_field(orders_ext['data']['products_meta'], 'post_id', order_item['product']['id'])
order_item['product']['sku'] = self.get_value_metadata(product_meta, '_sku', '')
order_item['product']['name'] = order_product['order_item_name']
order_item['qty'] = to_decimal(qty) if qty != '' else 1
order_item['price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['original_price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['tax_amount'] = self.get_value_metadata(order_product_metas, '_line_tax', 0.0000)
order_item['subtotal'] = order_item_subtotal
order_item['total'] = self.get_value_metadata(order_product_metas, '_line_total', 0.0000)
order_item['options'] = list()
if order_product['order_item_type'] == 'line_item':
order_item_options = list()
keys = {'_qty', '_tax_class', '_product_id', '_variation_id', '_line_subtotal', '_line_subtotal_tax',
'_line_total', '_line_tax', '_line_tax_data', '_original_order_item_id'}
for order_product_meta in order_product_metas:
if order_product_meta['meta_key'] not in keys:
order_item_option = self.construct_order_item_option()
# order_item_option['option_name'] = order_product_meta['meta_key']
order_item_option['option_name'] = unquote(order_product_meta['meta_key'])
if order_item_option['option_name'] and 'pa_' in order_item_option['option_name']:
continue
order_item_option['option_value_name'] = order_product_meta['meta_value']
# unquote(order_product['order_item_name'])
order_item_options.append(order_item_option)
order_item['options'] = order_item_options
order_items.append(order_item)
order_data['items'] = order_items
order_notes = get_list_from_list_by_field(orders_ext['data']['order_note'], 'comment_post_ID', order['ID'])
order_history = list()
for order_note in order_notes:
order_note_meta = get_list_from_list_by_field(orders_ext['data']['order_note_meta'], 'comment_id', order_note['comment_ID'])
order_history = self.construct_order_history()
order_history = self.add_construct_default(order_history)
order_history['id'] = order_note['comment_ID']
order_history['status'] = order_note['comment_approved']
order_history['comment'] = order_note['comment_content']
order_history['notified'] = self.get_value_metadata(order_note_meta, 'is_customer_note', False)
order_history['created_at'] = convert_format_time(order_note['comment_date'])
order_data['history'].append(order_history)
order_payment = self.construct_order_payment()
order_payment = self.add_construct_default(order_payment)
order_payment['id'] = order['ID']
order_payment['method'] = self.get_value_metadata(order_meta, '_payment_method')
order_payment['title'] = self.get_value_metadata(order_meta, '_payment_method_title')
# custom order_number plugin WooCommerce Sequential Order Numbers
# order_data['order_number'] = self.get_value_metadata(order_meta, '_order_number', '')
# order_data['order_number_formatted'] = self.get_value_metadata(order_meta, '_order_number_formatted', '')
# order_data['order_number_meta'] = self.get_value_metadata(order_meta, '_order_number_meta', '')
order_data['payment'] = order_payment
return response_success(order_data)
def get_order_id_import(self, convert, order, orders_ext):
return order['ID']
def check_order_import(self, convert, order, orders_ext):
return self.get_map_field_by_src(self.TYPE_ORDER, convert['id'], convert['code'])
def update_order_after_demo(self, order_id, convert, order, orders_ext):
all_queries = list()
delete_query = list()
# order item
delete_query_child = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN (SELECT order_item_id FROM _DBPFF_woocommerce_order_items WHERE order_id = ' + to_str(order_id) + ')'
}
delete_query.append(delete_query_child)
delete_query.append(self.create_delete_query_connector('woocommerce_order_items', {'order_id': order_id}))
self.import_multiple_data_connector(delete_query, 'delete_ord_update')
order_items = convert['items']
for item in order_items:
order_item_data = {
'order_item_name': item['product']['name'],
'order_item_type': 'line_item',
'order_id': order_id
}
order_item_query = self.create_insert_query_connector("woocommerce_order_items", order_item_data)
order_item_id = self.import_data_connector(order_item_query, 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, item['product']['id'])
if not product_id:
product_id = 0
order_item_meta = {
'_qty': item['qty'],
'_tax_class': '',
'_product_id': product_id,
'_variation_id': '',
'_line_subtotal': item['subtotal'],
'_line_total': item['total'],
'_line_subtotal_tax': 0,
'_line_tax': 0,
'_line_tax_data': php_serialize({
'total': 0,
'subtotal': 0
}),
}
for meta_key, meta_value in order_item_meta.items():
meta_insert = {
'order_item_id': order_item_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
for option in item['options']:
meta_insert = {
'order_item_id': order_item_id,
'meta_key': option['option_name'],
'meta_value': option['option_value_name']
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
return response_success()
def router_order_import(self, convert, order, orders_ext):
return response_success('order_import')
def before_order_import(self, convert, order, orders_ext):
return response_success()
def order_import(self, convert, order, orders_ext):
order_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': '',
'post_excerpt': '',
'post_status': 'publish' if convert['status'] else 'closed',
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': '',
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': '',
'menu_order': 0,
'post_type': 'shop_order',
'post_mime_type': '',
'comment_count': 0,
}
id_order = self.import_product_data_connector(self.create_insert_query_connector('posts', order_data))
self.insert_map(self.TYPE_ORDER, convert['id'], id_order, convert['code'])
id_customers = self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['customer']['id'])
order_meta = {
'_edit_lock': '',
'_edit_last': 1,
'customer_user': id_customers,
'_order_currency': convert['currency'],
'_order_shipping_tax': convert['shipping']['amount'],
'_order_tax': convert['tax']['amount'],
'_order_total': convert['total']['amount'],
'_order_version': '4.5.1',
'_prices_include_tax': 'no',
# '_billing_address_index': convert['billing_address']['first_name'] + ' '
# + convert['billing_address']['last_name'] + ' '
# + convert['billing_address']['company'] + ' '
# + convert['billing_address']['address_1'] + ' '
# + convert['billing_address']['address_2'] + ' '
# + convert['billing_address']['city'] + ' '
# + convert['billing_address']['state']['name'] + ' '
# + convert['billing_address']['postcode'] + ' '
# + convert['billing_address']['country']['country_code'] + ' '
# + convert['customer']['email'] + ' '
# + convert['billing_address']['telephone'],
# '_shipping_address_index': convert['shipping_address']['first_name'] + ' '
# + convert['shipping_address']['last_name'] + ' '
# + convert['shipping_address']['company'] + ' '
# + convert['shipping_address']['address_1'] + ' '
# + convert['shipping_address']['address_2'] + ' '
# + convert['shipping_address']['city'] + ' '
# + convert['shipping_address']['country']['name'] + ' '
# + convert['shipping_address']['postcode'] + ' '
# + convert['shipping_address']['country']['country_code'] + ' '
# + convert['customer']['email'] + ' '
# + convert['shipping_address']['telephone'],
'_cart_discount': 0,
'_cart_discount_tax': 0,
'_order_shipping': 0,
'_order_key': '',
'_payment_method': convert['payment']['method'],
'_payment_method_title': convert['payment']['title'],
'_created_via': 'migrate',
'_date_paid': '',
'_billing_first_name': convert['billing_address']['first_name'],
'_billing_last_name': convert['billing_address']['last_name'],
'_billing_company': convert['billing_address']['company'],
'_billing_address_1': convert['billing_address']['address_1'],
'_billing_address_2': convert['billing_address']['address_2'],
'_billing_city': convert['billing_address']['city'],
'_billing_state': convert['billing_address']['state']['name'],
'_billing_postcode': convert['billing_address']['postcode'],
'_billing_country': convert['billing_address']['country']['country_code'],
'_billing_email': convert['customer']['email'],
'_billing_phone': convert['billing_address']['telephone'],
'_shipping_first_name': convert['shipping_address']['first_name'],
'_shipping_last_name': convert['shipping_address']['last_name'],
'_shipping_company': convert['shipping_address']['company'],
'_shipping_address_1': convert['shipping_address']['address_1'],
'_shipping_address_2': convert['shipping_address']['address_2'],
'_shipping_city': convert['shipping_address']['city'],
'_shipping_state': convert['shipping_address']['state']['name'],
'_shipping_postcode': convert['shipping_address']['postcode'],
'_shipping_country': convert['billing_address']['country']['country_code'],
'_paid_date': convert['created_at'],
'_download_permissions_granted': 'yes',
'_recorded_sales': 'yes',
'_recorded_coupon_usage_counts': 'yes',
'_order_stock_reduced': 'yes',
}
for meta_key, meta_value in order_meta.items():
data = {
'post_id': id_order,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('postmeta', data=data), 'order')
for item in convert['items']:
item_name = item['product']['name']
woo_data = {
'order_item_name': item_name,
'order_item_type': 'line_item',
'order_id': id_order
}
id_order_item = self.import_data_connector(self.create_insert_query_connector('woocommerce_order_items', data=woo_data), 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'])
woo_item_meta = {
'_product_id': product_id,
'_variation_id': 0,
'_qty': item['qty'],
'tax_class': '',
'_line_subtotal': item['subtotal'],
'_line_subtotal_tax': '',
'_line_total': item['total'],
'_line_tax': item['tax_amount'],
'_line_tax_data': 'a:2:{s:8:"subtotal";a:0:{}s:5:"total";a:0:{}}',
'_reduced_stock': item['qty']
}
for meta_key, meta_value in woo_item_meta.items():
woo_item_data = {
'order_item_id': id_order_item,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('woocommerce_order_itemmeta', data=woo_item_data), 'order')
return response_success(id_order)
def after_order_import(self, order_id, convert, order, orders_ext):
return response_success()
def addition_order_import(self, convert, order, orders_ext):
return response_success()
# TODO: REVIEW
def prepare_reviews_import(self):
return self
def prepare_reviews_export(self):
return self
def get_reviews_main_export(self):
id_src = self._notice['process']['reviews']['id_src']
limit = self._notice['setting']['reviews']
query = {
'type': 'select',
'query': "SELECT cm.*, p.post_type FROM _DBPRF_comments AS cm "
"LEFT JOIN _DBPRF_posts AS p ON p.ID = cm.comment_post_ID "
"WHERE p.post_type = 'product' AND cm.comment_ID > " + to_str(
id_src) + " ORDER BY cm.comment_ID ASC LIMIT " + to_str(limit)
}
# reviews = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
reviews = self.select_data_connector(query, 'reviews')
if not reviews or reviews['result'] != 'success':
return response_error()
return reviews
def get_product_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key = '_downloadable_files' AND post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
return php_unserialize(products['data'][0]['meta_value'])
def get_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
download_data = dict()
for data in products['data']:
if data['meta_key'] in ['_download_expiry', '_download_limit']:
download_data[data['meta_key']] = data['meta_value'] if to_int(data['meta_value']) > 0 else None
return download_data
def get_reviews_ext_export(self, reviews):
url_query = self.get_connector_url('query')
reviews_ids = duplicate_field_value_from_list(reviews['data'], 'comment_ID')
product_ids = duplicate_field_value_from_list(reviews['data'], 'comment_post_ID')
review_ext_queries = {
'comment_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(
reviews_ids),
},
'product_info': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID IN " + self.list_to_in_condition(product_ids),
}
}
# reviews_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(review_ext_queries)})
reviews_ext = self.select_multiple_data_connector(review_ext_queries, 'reviews')
if not reviews_ext or reviews_ext['result'] != 'success':
return response_error()
return reviews_ext
def convert_review_export(self, review, reviews_ext):
review_data = self.construct_review()
# review_data = self.add(review_data)
review_data['id'] = review['comment_ID']
product_info = get_row_from_list_by_field(reviews_ext['data']['product_info'], 'ID', review['comment_post_ID'])
review_data['product']['id'] = review['comment_post_ID']
if product_info:
review_data['product']['code'] = product_info['post_name']
review_data['product']['name'] = product_info['post_title']
review_data['customer']['id'] = review['user_id']
review_data['customer']['code'] = review['comment_author_email']
review_data['customer']['name'] = review['comment_author']
review_data['title'] = ''
review_data['content'] = review['comment_content']
rv_status = {
'0': 2, # pending
'1': 1, # approved
'spam': 3 # not approved
}
review_data['status'] = rv_status.get(to_str(review['comment_approved']), 'spam')
review_data['created_at'] = convert_format_time(review['comment_date'])
review_data['updated_at'] = convert_format_time(review['comment_date'])
rating = self.construct_review_rating()
review_meta = get_list_from_list_by_field(reviews_ext['data']['comment_meta'], 'comment_id', review['comment_ID'])
rating['id'] = get_row_value_from_list_by_field(review_meta, 'comment_id', review['comment_ID'], 'meta_id')
rating['rate_code'] = 'default'
rating['rate'] = self.get_value_metadata(review_meta, 'rating', 5)
review_data['rating'].append(rating)
return response_success(review_data)
def get_review_id_import(self, convert, review, reviews_ext):
return review['comment_ID']
def check_review_import(self, convert, review, reviews_ext):
return True if self.get_map_field_by_src(self.TYPE_REVIEW, convert['id'], convert['code']) else False
def router_review_import(self, convert, review, reviews_ext):
return response_success('review_import')
def before_review_import(self, convert, review, reviews_ext):
return response_success()
def review_import(self, convert, review, reviews_ext):
lang_code = self._notice['target']['language_default']
if convert.get('store_id'):
lang_code = self._notice['map']['languages'].get(to_str(convert['store_id']))
product_id = False
if convert['product']['id'] or convert['product']['code']:
if self.is_wpml():
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'], lang = lang_code)
else:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, convert['product']['code'], lang = lang_code)
if not product_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'], 'product of review not exists.')
return response_error(msg)
customer_id = 0
if convert['customer']['id'] or convert['customer']['code']:
customer_id = self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['customer']['id'])
if not customer_id:
customer_id = 0
rv_status = {
'2': 0, # pedding
'1': 1, # approved
'3': 'spam', # not approved
'0': 0
}
review_data = {
'comment_post_ID': product_id,
'comment_author': convert['customer']['name'],
'comment_author_email': '',
'comment_date': convert.get('created_at') if convert.get('created_at') else get_current_time(),
'comment_date_gmt': convert['updated_at'] if convert['updated_at'] is not None else get_current_time(),
'comment_content': convert['content'] if convert['content'] else '',
'comment_karma': 0,
'comment_approved': rv_status.get(str(convert['status']), 'spam'),
'comment_parent': 0,
'comment_type': "review",
'user_id': customer_id
}
review_query = self.create_insert_query_connector("comments", review_data)
review_id = self.import_review_data_connector(review_query, True, convert['id'])
if not review_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'])
return response_error(msg)
self.insert_map(self.TYPE_REVIEW, convert['id'], review_id, convert['code'])
return response_success(review_id)
def after_review_import(self, review_id, convert, review, reviews_ext):
ratings = convert['rating']
for rating in ratings:
comment_meta = {
'rating': to_int(rating['rate'])
}
for meta_key, meta_value in comment_meta.items():
meta_insert = {
'comment_id': review_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("commentmeta", meta_insert)
self.import_data_connector(meta_query, 'review')
return response_success()
def addition_review_import(self, convert, review, reviews_ext):
return response_success()
# TODO: Page
def check_page_import(self, convert, page, pages_ext):
return True if self.get_map_field_by_src(self.TYPE_PAGE, convert['id'], convert['code'], lang = self._notice['target']['language_default']) else False
def page_import(self, convert, page, pages_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
code_name = convert['title']
code_name = self.sanitize_title(code_name).strip('-')
if self.is_wpml() and language_code:
code_name = code_name + '-' + language_code
check_slug_exist = True
while check_slug_exist:
check_slug_exist = True if self.select_map(self._migration_id, self.TYPE_PAGE, None, None, None, code_name, None, language_code) else False
if check_slug_exist:
code_name += to_str(get_value_by_key_in_dict(convert, 'id', ''))
parent_id = self.get_map_field_by_src(self.TYPE_PAGE, to_int(convert['parent_id']), None, language_code)
if not parent_id:
parent_id = 0
data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': convert['content'] if convert['content'] else "",
'post_title': convert['title'],
'post_status': 'publish' if convert['status'] else 'trash',
'comment_status': convert.get('comment_status', 'open'),
'ping_status': 'open',
'post_name': code_name[:200],
'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_parent': parent_id,
'post_type': 'page',
'comment_count': 0,
'guid': '',
'post_excerpt': '',
'to_ping': '',
'pinged': '',
'post_content_filtered': '',
'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0)
}
page_query = self.create_insert_query_connector('posts', data)
page_id = self.import_page_data_connector(page_query, True, convert['id'])
if not page_id:
return response_error('Page ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_PAGE, convert['id'], page_id, convert['title'], code_name, None, language_code)
return response_success(page_id)
def after_page_import(self, page_id, convert, page, pages_ext):
# data = {
# 'guid': self._notice['target']['cart_url'] + '?p=' + str(page_id)
# }
# where_id = {
# 'id': page_id
# }
# update_query = self.create_update_query_connector('posts', data, where_id)
# self.import_data_connector(update_query, 'page')
# data_meta = {
# 'post_id': page_id,
# 'meta_key': '_edit_lock',
# 'meta_value': int(time.time()),
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', data_meta), True, convert['id'])
# thumbnail_id = False
# if convert['images']:
# for image in convert['images']:
# image_process = self.process_image_before_import(image['url'], image.get('path', ''))
# image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path']), self._notice['target']['config']['image_product'].rstrip('/')))
# if image_import_path:
# product_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_product'])
# image_details = self.get_sizes(image_process['url'])
# thumbnail_id = self.wp_image(product_image, image_details)
# postmeta = dict()
# if thumbnail_id:
# postmeta['_thumbnail_id'] = thumbnail_id
# for meta_key, value in postmeta.items():
# postmeta_data = {
# 'post_id': page_id,
# 'meta_key': meta_key,
# 'meta_value': value
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', postmeta_data), True, convert['id'])
# data_revision = {
# 'post_author': 1,
# 'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_content': convert['content'],
# 'post_title': convert['title'],
# 'post_status': 'inherit',
# 'comment_status': 'closed',
# 'ping_status': 'closed',
# 'post_name': str(page_id) + '-revision-v1',
# 'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_parent': page_id,
# 'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0),
# 'post_type': 'revision',
# 'comment_count': 0,
# 'guid': self._notice['target']['cart_url'] + '/2019/08/27/' + str(page_id) + '-revision-v1',
# 'post_excerpt': '',
# 'to_ping': '',
# 'pinged': '',
# 'post_content_filtered': ''
# }
# self.import_page_data_connector(self.create_insert_query_connector('posts', data_revision), True, convert['id'])
super().after_page_import(page_id, convert, page, pages_ext)
if self.is_wpml():
source_language_code = self._notice['target']['language_default']
language_code = convert.get('language_code')
if not language_code:
language_code = source_language_code
source_language_code = None
trid = convert.get('trid')
if not trid:
trid = self.get_new_trid()
wpml_default = {
'element_type': 'post_page',
'element_id': page_id,
'trid': trid,
'language_code': language_code,
'source_language_code': source_language_code
}
self.import_data_connector(self.create_insert_query_connector("icl_translations", wpml_default), 'page')
if not convert.get('language_code'):
list_target_id = list()
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if target_language_id in list_target_id or to_str(target_language_id) == to_str(self._notice['target']['language_default']):
continue
list_target_id.append(target_language_id)
page_lang = self.get_convert_data_language(convert, src_language_id)
page_lang['trid'] = trid
page_lang['language_code'] = target_language_id
page_import = self.page_import(page_lang, page, pages_ext)
if page_import['result'] == 'success':
self.after_page_import(page_import['data'], page_lang, page, pages_ext)
return response_success()
# TODO: Coupon
def prepare_coupons_import(self):
return response_success()
def prepare_coupons_export(self):
return self
def get_coupons_main_export(self):
id_src = self._notice['process']['coupons']['id_src']
limit = self._notice['setting']['coupons']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'shop_coupon' ORDER BY ID ASC LIMIT " + to_str(limit),
}
coupons = self.select_data_connector(query, 'coupons')
if not coupons or coupons['result'] != 'success':
return response_error()
return coupons
def get_coupons_ext_export(self, coupons):
coupon_ids = duplicate_field_value_from_list(coupons['data'], 'ID')
coupon_id_con = self.list_to_in_condition(coupon_ids)
coupon_ext_queries = {
'postmeta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + coupon_id_con
},
}
coupons_ext = self.select_multiple_data_connector(coupon_ext_queries, 'products')
if (not coupons_ext) or coupons_ext['result'] != 'success':
return response_error()
return coupons_ext
def convert_coupon_export(self, coupon, coupons_ext):
coupon_data = self.construct_coupon()
coupon_data['id'] = coupon['ID']
postmeta = get_list_from_list_by_field(coupons_ext['data']['postmeta'], 'post_id', coupon['ID'])
coupon_data['code'] = coupon['post_title']
coupon_data['title'] = coupon['post_name']
coupon_data['description'] = coupon['post_excerpt']
coupon_data['status'] = True if coupon['post_status'] == 'publish' else False
coupon_data['created_at'] = convert_format_time(coupon['post_date'])
coupon_data['updated_at'] = convert_format_time(coupon['post_modified'])
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'date_expires'))
if not coupon_data['to_date']:
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'expiry_date'))
coupon_data['min_spend'] = self.get_value_metadata(postmeta, 'minimum_amount') if to_str(self.get_value_metadata(postmeta, 'minimum_amount')) != 'None' else None
coupon_data['max_spend'] = self.get_value_metadata(postmeta, 'maximum_amount') if to_str(self.get_value_metadata(postmeta, 'maximum_amount')) != 'None' else None
coupon_data['times_used'] = self.get_value_metadata(postmeta, 'usage_count')
coupon_data['usage_limit'] = self.get_value_metadata(postmeta, 'usage_limit', 0)
coupon_data['discount_amount'] = self.get_value_metadata(postmeta, 'coupon_amount')
coupon_data['usage_per_customer'] = self.get_value_metadata(postmeta, 'usage_limit_per_user')
coupon_data['type'] = self.PERCENT if self.get_value_metadata(postmeta, 'discount_type') == 'percent' else self.FIXED
coupon_data['simple_free_shipping'] = 1 if self.get_value_metadata(postmeta, 'free_shipping') == 'yes' else 0
coupon_data['limit_usage_to_x_items'] = self.get_value_metadata(postmeta, 'limit_usage_to_x_items')
product_ids = self.get_value_metadata(postmeta, 'product_ids')
if product_ids:
coupon_data['products'] = to_str(product_ids).split(',')
category_ids = self.get_value_metadata(postmeta, 'product_categories')
if category_ids:
category_ids = php_unserialize(category_ids)
if category_ids:
coupon_data['categories'] = category_ids
return response_success(coupon_data)
def get_coupon_id_import(self, convert, coupon, coupons_ext):
return coupon['ID']
def check_coupon_import(self, convert, coupon, coupons_ext):
return True if self.get_map_field_by_src(self.TYPE_COUPON, convert['id'], convert['code']) else False
def router_coupon_import(self, convert, coupon, coupons_ext):
return response_success('coupon_import')
def before_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def coupon_import(self, convert, coupon, coupons_ext):
coupon_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': convert['code'] if convert['code'] else convert['title'],
'post_excerpt': self.change_img_src_in_text(get_value_by_key_in_dict(convert, 'description', '')),
'post_status': "publish" if convert['status'] else "draft",
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': self.strip_html_tag(convert['title']),
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=",
'menu_order': convert.get('menu_order', 0),
'post_type': "shop_coupon",
'post_mime_type': '',
'comment_count': 0
}
coupon_query = self.create_insert_query_connector('posts', coupon_data)
coupon_import = self.import_data_connector(coupon_query, 'coupons', convert['id'])
if not coupon_import:
return response_error()
self.insert_map(self.TYPE_COUPON, convert['id'], coupon_import, convert['code'])
return response_success(coupon_import)
def after_coupon_import(self, coupon_id, convert, coupon, coupons_ext):
all_queries = list()
product_ids = convert.get('products')
if product_ids:
product_id_map_arr = list()
for product_id in product_ids:
map_product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, product_id)
if map_product_id and map_product_id not in product_id_map_arr:
product_id_map_arr.append(to_str(map_product_id))
if product_id_map_arr:
product_ids = ','.join(product_id_map_arr)
else:
product_ids = None
category_ids = convert.get('categories')
cate_id_map_arr = list()
if category_ids:
for category_id in category_ids:
map_cate_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category_id)
if map_cate_id and map_cate_id not in cate_id_map_arr:
cate_id_map_arr.append(to_str(map_cate_id))
# if product_id_map_arr:
# product_ids = ','.join(cate_id_map_arr)
# else:
# product_ids = None
coupon_meta = {
'discount_type': 'percent' if convert['type'] == self.PERCENT else 'fixed_cart' if convert['type'] == self.FIXED else 'fixed_product',
'coupon_amount': convert['discount_amount'],
'usage_limit': convert['usage_limit'],
'usage_limit_per_user': convert['usage_per_customer'],
'free_shipping': 'yes' if 'simple_free_shipping' in convert and to_str(to_int(convert['simple_free_shipping'])) == '1' else 'no',
'usage_count': convert['times_used'],
'date_expires': convert['to_date'] if (convert['to_date'] and convert['to_date'] != '0000-00-00 00:00:00') else '',
'minimum_amount': convert['min_spend'],
'maximum_amount': convert['max_spend'],
'product_ids': product_ids if product_ids else None,
'product_categories': php_serialize(cate_id_map_arr) if cate_id_map_arr else '',
'customer_email': php_serialize(convert.get('customer')),
'limit_usage_to_x_items': convert.get('limit_usage_to_x_items', 0),
}
for meta_key, meta_value in coupon_meta.items():
meta_insert = {
'post_id': coupon_id,
'meta_key': meta_key,
'meta_value': str(meta_value).replace(')', '').replace(',', '').replace("'", '')
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_queries.append(meta_query)
all_queries.append(self.create_update_query_connector('posts', {'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=" + to_str(coupon_id)}, {'ID': coupon_id}))
self.import_multiple_data_connector(all_queries, 'coupons')
return response_success()
def addition_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def display_finish_target(self):
migration_id = self._migration_id
recent_exist = self.select_row(TABLE_RECENT, {'migration_id': migration_id})
notice = json.dumps(self._notice)
if recent_exist:
self.update_obj(TABLE_RECENT, {'notice': notice}, {'migration_id': migration_id})
else:
self.insert_obj(TABLE_RECENT, {'notice': notice, 'migration_id': migration_id})
target_cart_type = self._notice['target']['cart_type']
target_setup_type = self.target_cart_setup(target_cart_type)
# if target_setup_type == 'connector':
token = self._notice['target']['config']['token']
url = self.get_connector_url('clearcache', token)
self.get_connector_data(url)
all_queries = list()
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'product_cat_children'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_timeout_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'urlrewrite_type'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_posts` SET `comment_count`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_posts`.ID AND `_DBPRF_comments`.comment_approved = 1) WHERE `post_type` IN ('product', 'post')"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_postmeta`.post_id AND `_DBPRF_comments`.comment_approved = 1) WHERE `meta_key` = '_wc_review_count'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT AVG(cmta.`meta_value`) FROM `_DBPRF_comments` AS cmt LEFT JOIN `_DBPRF_commentmeta` AS cmta ON cmt.`comment_ID` = cmta.`comment_ID` WHERE cmt.`comment_post_ID` = `_DBPRF_postmeta`.`post_id` AND cmt.comment_approved = 1 AND cmta.`meta_key` = 'rating') WHERE `meta_key` = '_wc_average_rating'"
})
# all_queries.append({
# 'type': 'query',
# 'query': "UPDATE `_DBPRF_term_taxonomy` tt "
# "SET tt.count = (SELECT COUNT( *) as total "
# "FROM _DBPRF_term_relationships r JOIN _DBPRF_posts p ON r.object_id = p.ID "
# "WHERE r.term_taxonomy_id = tt.term_taxonomy_id AND p.post_type = 'product' AND p.post_parent = '') "
# "WHERE tt.taxonomy IN('product_cat', 'product_type', 'product_tag', 'product_brand')"
# })
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_term_taxonomy` AS tt SET tt.count = (SELECT COUNT(1) AS total FROM _DBPRF_term_relationships AS tr WHERE tt.term_taxonomy_id = tr.term_taxonomy_id AND tr.object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = 'product'))"
})
clear_cache = self.import_multiple_data_connector(all_queries)
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'urlrewrite',
'autoload': 'yes'
}
if self._notice['support'].get('seo_301'):
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'url301',
'autoload': 'yes'
}
option_query = self.create_insert_query_connector('options', option_data)
option_import = self.import_data_connector(option_query, 'options')
return response_success()
def substr_replace(self, subject, replace, start, length):
if length == None:
return subject[:start] + replace
elif length < 0:
return subject[:start] + replace + subject[length:]
else:
return subject[:start] + replace + subject[start + length:]
def add_construct_default(self, construct):
construct['site_id'] = 1
construct['language_id'] = self._notice['src']['language_default']
return construct
def get_term_by_name(self, data):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_visibility' AND t.name = '" + data + "'"
}
product_taxonomy = self.select_data_connector(query)
if product_taxonomy['result'] == 'success' and product_taxonomy['data']:
return product_taxonomy['data'][0]['term_taxonomy_id']
return None
def get_product_type(self, product_type):
if not self.product_types:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_type'"
}
product_types = self.select_data_connector(query)
if product_types['result'] == 'success' and product_types['data']:
for product_type_row in product_types['data']:
self.product_types[product_type_row['slug']] = product_type_row['term_taxonomy_id']
return self.product_types.get(product_type, 2)
def import_category_parent(self, convert_parent, lang_code = None):
category_type = self.TYPE_CATEGORY
if convert_parent.get('is_blog'):
category_type = self.TYPE_CATEGORY_BLOG
parent_exists = self.get_map_field_by_src(category_type, convert_parent['id'], convert_parent['code'], lang_code)
if parent_exists:
return response_success(parent_exists)
if self.is_wpml() and lang_code:
convert_parent['language_code'] = lang_code
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if to_str(lang_code) == to_str(target_language_id):
lang_data = convert_parent
if to_str(src_language_id) in convert_parent['languages'] and convert_parent['languages'][to_str(src_language_id)]:
lang_data = convert_parent['languages'][to_str(src_language_id)]
convert_parent['name'] = lang_data['name']
convert_parent['description'] = lang_data['description']
convert_parent['short_description'] = lang_data['short_description']
convert_parent['meta_title'] = lang_data['meta_title']
convert_parent['meta_keyword'] = lang_data['meta_keyword']
convert_parent['meta_description'] = lang_data['meta_description']
convert_parent['url_key'] = lang_data.get('url_key', '')
category = get_value_by_key_in_dict(convert_parent, 'category', dict())
categories_ext = get_value_by_key_in_dict(convert_parent, 'categories_ext', dict())
category_parent_import = self.category_import(convert_parent, category, categories_ext)
self.after_category_import(category_parent_import['data'], convert_parent, category, categories_ext)
return category_parent_import
def get_list_from_list_by_field_as_first_key(self, list_data, field = '', first_key = ''):
result = list()
if isinstance(list_data, dict):
for key, row in list_data.items():
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
if field and to_str(field) != '':
for row in list_data:
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
for row in list_data:
if row:
v_index = row.find(first_key)
if v_index == 0:
result.append(row)
return result
def process_image_before_import(self, url, path):
if not path:
full_url = url
path = strip_domain_from_url(url)
else:
full_url = join_url_path(url, path)
if path and path.find('/wp-content/uploads/') != -1:
newpath = path.split('/wp-content/uploads/')
if newpath and to_len(newpath) > 1:
path = newpath[1]
path = re.sub(r"[^a-zA-Z0-9.-_()]", '', path)
full_url = self.parse_url(full_url)
return {
'url': full_url,
'path': path
}
def wpml_attributes_to_in_condition(self, list_keys):
if not list_keys:
return "('null')"
result = "('tax_" + "','tax_".join([str(k) for k in list_keys]) + "')"
return result
def brand_image_in_condition(self, term_ids):
if not term_ids:
return "('null')"
result = "('brand_taxonomy_image" + "','brand_taxonomy_image".join([str(k) for k in term_ids]) + "')"
return result
def detect_seo(self):
return 'default_seo'
def categories_default_seo(self, category, categories_ext):
result = list()
seo_cate = self.construct_seo_category()
seo_cate['request_path'] = self._notice['src']['config']['product_category_base'].strip('/') + '/' + to_str(category['slug'])
seo_cate['default'] = True
result.append(seo_cate)
return result
def products_default_seo(self, product, products_ext):
result = list()
if self._notice['src']['config']['product_base'].find('%product_cat%') != -1:
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product_category['slug']) + '/' + to_str(product['post_name'])
seo_product['category_id'] = product_category['term_id']
result.append(seo_product)
else:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
if product['post_name']:
seo_product = self.construct_seo_product()
seo_product['request_path'] = to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
return result
def get_order_status_label(self, order_status):
if not order_status:
return ''
order_status = order_status.replace('wc-', '')
order_status = order_status.replace('-', ' ')
order_status = order_status.capitalize()
return order_status
def get_woo_attribute_id(self, pro_attr_code, attribute_name, language_code = None, language_attribute_data = None, attribute_type = 'select'):
# if to_str(pro_attr_code)[0:3] != 'pa_':
# pro_attr_code = "pa_" + pro_attr_code
# if self.is_wpml() and language_code != self._notice['target']['language_default']:
# attribute_data_default = self.get_convert_data_language(language_attribute_data, None, self._notice['target']['language_default'], 'option_languages')
# option_lang_name = attribute_data_default.get('option_name')
# if not option_lang_name:
# option_lang_name = attribute_data_default.get('attribute_name')
# if option_lang_name:
pro_attr_code = urllib.parse.unquote(pro_attr_code)
woo_attribute_id = self.get_map_field_by_src(self.TYPE_ATTR, None, 'pa_' + pro_attr_code)
# if woo_attribute_id:
# return woo_attribute_id
if not woo_attribute_id:
attribute_data = {
'attribute_name': pro_attr_code,
'attribute_type': attribute_type
}
attribute_result = self.select_data_connector(self.create_select_query_connector('woocommerce_attribute_taxonomies', attribute_data))
woo_attribute_id = None
if attribute_result and attribute_result['data']:
woo_attribute_id = attribute_result['data'][0]['attribute_id']
if not woo_attribute_id:
pro_attr_data = {
'attribute_name': pro_attr_code,
'attribute_label': attribute_name,
'attribute_type': attribute_type,
'attribute_orderby': "menu_order",
'attribute_public': 0,
}
woo_attribute_id = self.import_data_connector(self.create_insert_query_connector('woocommerce_attribute_taxonomies', pro_attr_data), 'products')
if woo_attribute_id:
self.insert_map(self.TYPE_ATTR, None, woo_attribute_id, 'pa_' + pro_attr_code)
if woo_attribute_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(language_attribute_data, None, language_code, 'option_languages')
option_lang_name = attribute_data_lang.get('option_name')
if not option_lang_name:
option_lang_name = attribute_data_lang.get('attribute_name')
if option_lang_name != attribute_name:
translate_id = self.get_map_field_by_src('translate', woo_attribute_id, None, language_code)
if not translate_id:
translate_query = {
'icl_strings': self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}),
'icl_string_translations': {
'type': 'select',
'query': "select * from _DBPRF_icl_string_translations where string_id in (" + self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}, 'id')['query'] + ")"
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
icl_string_id = None
is_tranlate = False
if not select['data']['icl_strings']:
icl_strings_data = {
'language': self._notice['target']['language_default'],
'context': 'WordPress',
'name': 'taxonomy singular name: ' + attribute_name,
'value': attribute_name,
'string_package_id': None,
'wrap_tag': '',
'type': 'LINE',
'title': None,
'status': 2,
'gettext_context': '',
'domain_name_context_md5': hashlib.md5(to_str('WordPresstaxonomy singular name: ' + attribute_name).encode()),
'translation_priority': 'optional',
'word_count': None
}
icl_string_id = self.import_product_data_connector(self.create_insert_query_connector('icl_strings', icl_strings_data))
else:
icl_string = select['data']['icl_strings'][0]
if icl_string['language'] != language_code:
icl_string_id = icl_string['id']
check = get_row_from_list_by_field(select['data']['icl_string_translations'], 'language', language_code)
is_tranlate = True if check else False
else:
is_tranlate = True
if icl_string_id and not is_tranlate:
icl_string_translations_data = {
'string_id': icl_string_id,
'language': language_code,
'status': 10,
'value': option_lang_name,
'translator_id': None,
'translation_service': '',
'batch_id': 0,
'translation_date': get_current_time()
}
icl_string_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_string_translations', icl_string_translations_data))
if icl_string_translation_id:
self.insert_map('translate', woo_attribute_id, icl_string_translation_id, None, None, None, language_code)
return woo_attribute_id
def get_woo_attribute_value(self, attribute_value, pro_attr_code, language_code = None, attribute_data = None, desc = ''):
pro_attr_code = urllib.parse.unquote(pro_attr_code)
if self.is_wpml():
value_data = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
attribute_value = to_str(attribute_value)[:200]
slug_default = self.get_slug_attr(attribute_data)
slug = self.get_slug_attr(attribute_data, language_code)
opt_value_id = None
# if opt_value_exist:
# return opt_value_exist['id_desc']
# opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug)
opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_exist:
if not self.is_wpml() or not language_code or language_code == self._notice['target']['language_default']:
return opt_value_exist['id_desc']
else:
opt_value_id = opt_value_exist['id_desc']
if not opt_value_id:
query = {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_value) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
attribute_result = self.select_data_connector(query)
if attribute_result and attribute_result['data']:
opt_value_id = attribute_result['data'][0]['term_taxonomy_id']
if not opt_value_id:
if self.is_wpml() and language_code != self._notice['target']['language_default']:
new_slug = slug_default + '-' + to_str(language_code) if slug == slug_default else slug
else:
new_slug = slug_default
value_term = {
'name': attribute_value,
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if attribute_data_lang['option_value_name'] != attribute_value:
translate_query = {
'icl_translations': {
'type': 'select',
'query': 'select * from _DBPRF_icl_translations where trid in (select trid from wp_icl_translations where ' + self.dict_to_where_condition({'element_id': opt_value_id, 'element_type': 'tax_pa_' + pro_attr_code}) + ')'
},
'term': {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_data_lang['option_value_name']) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
trid = None
is_tranlate = False
if not select['data']['icl_translations']:
trid = self.get_new_trid()
icl_translations_data = {
'language_code': self._notice['target']['language_default'],
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': None,
}
icl_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
else:
icl_translations = select['data']['icl_translations'][0]
trid = icl_translations['trid']
check = get_row_from_list_by_field(select['data']['icl_translations'], 'language_code', language_code)
is_tranlate = True if check else False
if trid and not is_tranlate:
new_slug = slug_default + '-' + to_str(language_code) if slug != slug_default else slug_default
value_term = {
'name': attribute_data_lang['option_value_name'],
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
icl_translations_data = {
'language_code': language_code,
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': self._notice['target']['language_default'],
}
self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
return opt_value_id
def to_timestamp(self, value, str_format = '%Y-%m-%d %H:%M:%S'):
try:
timestamp = to_int(time.mktime(time.strptime(value, str_format)))
if timestamp:
return timestamp
return to_int(time.time())
except:
return to_int(time.time())
def get_map_field_by_src(self, map_type = None, id_src = None, code_src = None, lang = None, field = 'id_desc'):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().get_map_field_by_src(map_type, id_src, code_src, field)
if not id_src and not code_src:
return False
_migration_id = self._migration_id
# if id_src:
# code_src = None
# else:
# code_src = None
map_data = self.select_map(_migration_id, map_type, id_src, None, code_src, None, None, lang)
if not map_data:
return False
return map_data.get(field, False)
def select_map(self, _migration_id = None, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().select_map(_migration_id, map_type, id_src, id_desc, code_src, code_desc, value)
where = dict()
if _migration_id:
where['migration_id'] = _migration_id
if map_type:
where['type'] = map_type
if id_src:
where['id_src'] = id_src
if id_desc:
where['id_desc'] = id_desc
if code_src:
where['code_src'] = code_src
if code_desc:
where['code_desc'] = code_desc
if value:
where['value'] = value
if (self.is_wpml() or self.is_polylang()) and map_type in [self.TYPE_CATEGORY, self.TYPE_PRODUCT, self.TYPE_ATTR, self.TYPE_ATTR_VALUE]:
where['lang'] = lang
if not where:
return None
result = self.select_obj(TABLE_MAP, where)
try:
data = result['data'][0]
except Exception as e:
data = None
return data
def insert_map(self, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if to_int(id_src) == 0 and to_str(id_src) != '0':
id_src = None
data_inset = {
'migration_id': self._migration_id,
'type': map_type,
'id_src': id_src,
'code_src': code_src,
'id_desc': id_desc,
'code_desc': code_desc,
'value': value,
}
if self.is_wpml() or self.is_polylang():
data_inset['lang'] = lang
insert = self.insert_obj(TABLE_MAP, data_inset)
if (not insert) or (insert['result'] != 'success'):
return False
return insert['data']
def is_wpml(self):
return self._notice[self.get_type()]['support'].get('wpml')
def is_polylang(self):
return self._notice[self.get_type()]['support'].get('polylang')
def get_convert_data_language(self, convert, src_language_id = None, target_language_id = None, key_language = 'languages'):
if not self.is_wpml() and not self.is_polylang():
return convert
list_language_data = convert.get(key_language)
if not list_language_data:
return convert
language_data = None
if src_language_id:
if list_language_data.get(to_str(src_language_id)):
language_data = list_language_data[to_str(src_language_id)]
elif target_language_id:
for src_id, data in list_language_data.items():
if self._notice['map']['languages'].get(to_str(src_id)) == target_language_id:
language_data = data
break
if not language_data:
return convert
for key_lang, value in language_data.items():
if not value:
continue
if key_lang == 'option_value_name' and convert.get('option_type') == self.OPTION_MULTISELECT and 'position_option' in convert:
value_lang = to_str(value).split(';')
if len(value_lang) > to_int(convert.get('position_option')):
value = value_lang[to_int(convert.get('position_option'))]
convert[key_lang] = value
return convert
def get_pro_attr_code_default(self, option):
if self.is_wpml():
option = self.get_convert_data_language(option, None, self._notice['target']['language_default'], 'option_languages')
pro_attr_code = to_str(option['option_name']).lower()
# attribute_name = option['option_name']
pro_attr_code = pro_attr_code.replace(' ', '_')
if option['option_code']:
pro_attr_code = to_str(option['option_code']).lower()
pro_attr_code = pro_attr_code.replace(' ', '_')
pro_attr_code_len = 28
check_encode = chardet.detect(pro_attr_code.encode())
if check_encode['encoding'] != 'ascii':
pro_attr_code = pro_attr_code[0:14]
pro_attr_code_len = 200
pro_attr_code = self.sanitize_title(pro_attr_code, pro_attr_code_len)
return pro_attr_code
def get_slug_attr(self, option_value, language_code = None):
if option_value['option_value_code']:
return self.sanitize_title(to_str(option_value['option_value_code'])).lower()
attribute_value = option_value['option_value_name']
if self.is_wpml():
if not language_code:
language_code = self._notice['target']['language_default']
value_data = self.get_convert_data_language(option_value, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
return self.sanitize_title(to_str(attribute_value).lower())
def get_key_check_default(self, attributes):
key_check = ''
for children_attribute in attributes:
if self.is_wpml():
children_attribute = self.get_convert_data_language(children_attribute, None, self._notice['target']['language_default'], 'option_value_languages')
if key_check:
key_check += '|'
key_check += to_str(children_attribute['option_name']) + ':' + to_str(children_attribute['option_value_name'])
return key_check
def lecm_rewrite_table_construct(self):
return {
'table': '_DBPRF_lecm_rewrite',
'rows': {
'id': 'INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY',
'link': 'VARCHAR(255)',
'type': 'VARCHAR(255)',
'type_id': 'INT(11)',
'redirect_type': 'SMALLINT(5)',
},
}
def is_woo2woo(self):
return self._notice['src']['cart_type'] == self._notice['target']['cart_type']
def check_sync_child(self, child, combination, check_any = False):
for attribute in combination:
if not check_any:
if to_str(child.get(attribute['option_name'])) != to_str(attribute['option_value_name']):
if to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
elif to_str(child.get(attribute['option_name'])) and to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
return True
def select_all_category_map(self):
where = dict()
where['migration_id'] = self._migration_id
where['type'] = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
result = self.select_obj(TABLE_MAP, where)
data = list()
if result['result'] == 'success' and result['data']:
data = result['data']
result_data = list()
if data:
for row in data:
value = row['id_desc']
result_data.append(value)
return result_data
def create_file_variant_limit(self):
file_path = get_pub_path() + '/media/' + to_str(self._migration_id)
if not os.path.exists(file_path):
os.makedirs(file_path, mode = 0o777)
file_name = file_path + '/variants.csv'
column = ['src_id', 'target_id', 'name', 'sku', 'variants']
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def warning_variant_limit(self, convert):
if convert['id']:
product = "#" + to_str(convert['id'])
else:
product = ': ' + to_str(convert['code'])
self.sleep_time(0, 'variant', True, msg = product)
def log_variant_limit(self, product_id, convert, variants):
self.is_variant_limit = True
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if not os.path.isfile(file_name):
self.create_file_variant_limit()
column = [convert['id'] if convert['id'] else convert['code'], product_id, convert['name'], convert['sku'], variants]
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def check_slug_exist(self, slug = None):
select = {
'slug': slug,
}
category_data = self.select_data_connector(self.create_select_query_connector('terms', select))
try:
term_id = category_data['data'][0]['term_id']
except Exception:
term_id = False
return term_id
def get_query_img_wpml(self, img_id, language_code):
source_language_code = self._notice['target']['language_default']
default_language_code = language_code
if source_language_code == default_language_code:
default_language_code = source_language_code
source_language_code = None
trid = self.get_new_trid()
wpml_img_data = {
'element_type': 'post_attachment',
'element_id': img_id,
'trid': trid,
'language_code': default_language_code,
'source_language_code': source_language_code
}
wpml_img_query = self.create_insert_query_connector("icl_translations", wpml_img_data)
return wpml_img_query
def check_exist_code_product(self, code_product):
check = self.select_data_connector(self.create_select_query_connector('posts', {'posttype'}))
def _get_customer_lookup_id(self, user_id):
if not user_id:
return 0
select = {
'user_id': user_id,
}
customer_lookup_data = self.select_data_connector(self.create_select_query_connector('wc_customer_lookup', select))
try:
customer_lookup_id = customer_lookup_data['data'][0]['customer_id']
except Exception:
customer_lookup_id = 0
return customer_lookup_id | [
"noreply@github.com"
] | Popss2701.noreply@github.com |
59534247ee1449496330021da54fc527d05a14e3 | 34a043e6961639657e36e7ac9fd459ad5b1f6de1 | /openpathsampling/experimental/storage/test_mdtraj_json.py | f3c57c4ad31a103b69866649884b52ccf8542b6a | [
"MIT"
] | permissive | dwhswenson/openpathsampling | edaddc91e443e7ffc518e3a06c99fc920ad9d053 | 3d02df4ccdeb6d62030a28e371a6b4ea9aaee5fe | refs/heads/master | 2023-02-04T12:31:17.381582 | 2023-01-30T21:17:01 | 2023-01-30T21:17:01 | 23,991,437 | 3 | 1 | MIT | 2022-08-12T17:48:04 | 2014-09-13T10:15:43 | Python | UTF-8 | Python | false | false | 2,273 | py | from .mdtraj_json import *
import pytest
import numpy as np
import numpy.testing as npt
from ..simstore.custom_json import bytes_codec, numpy_codec, custom_json_factory
from ..simstore.test_custom_json import CustomJSONCodingTest
from openpathsampling.tests.test_helpers import data_filename
class MDTrajCodingTest(CustomJSONCodingTest):
def setup(self):
if not HAS_MDTRAJ:
pytest.skip()
self.filename = data_filename('ala_small_traj.pdb')
def test_default(self):
# custom for handling numpy
for (obj, dct) in zip(self.objs, self.dcts):
default = self.codec.default(obj)
numpy_attrs = [attr for attr, val in dct.items()
if isinstance(val, np.ndarray)]
other_attrs = [attr for attr, val in dct.items()
if not isinstance(val, np.ndarray)]
for attr in numpy_attrs:
npt.assert_array_equal(default[attr], dct[attr])
for attr in other_attrs:
assert default[attr] == dct[attr]
def test_round_trip(self):
codecs = [numpy_codec, bytes_codec] + mdtraj_codecs
encoder, decoder = custom_json_factory(codecs)
self._test_round_trip(encoder, decoder)
class TestTopologyCoding(MDTrajCodingTest):
def setup(self):
super(TestTopologyCoding, self).setup()
self.codec = top_codec
top = md.load(self.filename).topology
dataframe, bonds = top.to_dataframe()
self.objs = [top]
self.dcts = [{
'__class__': 'Topology',
'__module__': 'mdtraj.core.topology',
'atoms': dataframe.to_json(),
'bonds': bonds
}]
class TestTrajectoryCoding(MDTrajCodingTest):
def setup(self):
super(TestTrajectoryCoding, self).setup()
self.codec = traj_codec
traj = md.load(self.filename)
self.objs = [traj]
self.dcts = [{
'__class__': 'Trajectory',
'__module__': 'mdtraj.core.trajectory',
'xyz': traj.xyz,
'topology': traj.topology,
'time': traj.time,
'unitcell_lengths': traj.unitcell_lengths,
'unitcell_angles': traj.unitcell_angles
}]
| [
"dwhs@hyperblazer.net"
] | dwhs@hyperblazer.net |
3493381777ce41dcb975ad7f011e2b61b299f283 | 69d0deb5921edc82eea0ae184db99b87a0ca6900 | /catkin_ws/build/srrg2_solver_calib_addons/catkin_generated/pkg.installspace.context.pc.py | f987c6cfb6f21976c245dc0a57e1233ba5d4bbf7 | [
"MIT"
] | permissive | laaners/progetto-labiagi_pick_e_delivery | 8d4006e206cd15b90b7e2291876c2b201e314621 | 3453bfbc1dd7562c78ba06c0f79b069b0a952c0e | refs/heads/main | 2023-08-19T00:17:51.491475 | 2021-09-16T16:35:45 | 2021-09-16T16:35:45 | 409,192,385 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsrrg2_solver_calib_utils_library".split(';') if "-lsrrg2_solver_calib_utils_library" != "" else []
PROJECT_NAME = "srrg2_solver_calib_addons"
PROJECT_SPACE_DIR = "/home/alessiohu/Desktop/progetto-labiagi/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"hu.183947@studenti.uniroma1.it"
] | hu.183947@studenti.uniroma1.it |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.