blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9986072f1abd4c08e2ba430e0d507c373a99150
|
f0abac6fa2ec8740a63d3ad1fffd778bd317de6c
|
/project1/parti_min_df=2.py
|
788f879e57d37fe944f54937ea66a2cea7712f62
|
[] |
no_license
|
ashwinkannan94/Large-Scale-Data-Mining
|
3beb89072cdba3ad27c2e279c8806f8be79d333d
|
51df9c573f7d6f92b6c8a05007f204c6f967d267
|
refs/heads/master
| 2021-05-09T00:39:32.973665
| 2018-02-21T07:00:26
| 2018-02-21T07:00:26
| 119,751,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
from sklearn.datasets import fetch_20newsgroups
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import NMF
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
from sklearn.linear_model import LogisticRegression
# part a
computer_categories = ['comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware']
recreational_categories = ['rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey']
computer_train = fetch_20newsgroups(subset='train', categories=computer_categories, shuffle=True, random_state=42)
computer_test = fetch_20newsgroups(subset='test', categories=computer_categories, shuffle=True, random_state=42)
recreational_train = fetch_20newsgroups(subset='train', categories=recreational_categories, shuffle=True, random_state=42)
recreational_test = fetch_20newsgroups(subset='test', categories=recreational_categories, shuffle=True, random_state=42)
train_and_test = computer_train.data + computer_test.data + recreational_train.data + recreational_test.data
stop_words = text.ENGLISH_STOP_WORDS
analyzer = CountVectorizer().build_analyzer()
stemmer = SnowballStemmer("english")
def stemmed_words(doc):
return (stemmer.stem(w) for w in analyzer(doc))
train_classification = [1] * len(computer_train.data) + [-1] * len(recreational_train.data)
test_classification = [1] * len(computer_test.data) + [-1] * len(recreational_test.data)
count_vect = CountVectorizer(analyzer='word', min_df=2, stop_words=stop_words, tokenizer=stemmed_words)
X_train_counts = count_vect.fit_transform(train_and_test)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# SVD LSI
svd = TruncatedSVD(n_components=50, random_state=42)
svd_lsi_tfidf = svd.fit_transform(X_train_tfidf)
LSI_test_data = np.concatenate((svd_lsi_tfidf[len(computer_train.data):(len(computer_train.data) + len(computer_test.data))],
svd_lsi_tfidf[(len(computer_train.data) + len(computer_test.data) + len(recreational_train.data)):]))
LSI_train_data = np.concatenate((svd_lsi_tfidf[0:len(computer_train.data)], svd_lsi_tfidf[(len(computer_train.data) +
len(computer_test.data)):(len(computer_train.data) + len(computer_test.data) + len(recreational_train.data))]))
l1_accuracy = []
l2_accuracy = []
def logistic_regression(regularize, penalize):
logistic_regression_classifier = LogisticRegression(C=regularize, penalty=penalize)
logistic_regression_classifier.fit(LSI_train_data, train_classification)
class_which_was_predicted = logistic_regression_classifier.predict(LSI_test_data)
actual_class_passed = test_classification
predict_probability = logistic_regression_classifier.predict_proba(LSI_test_data[:])[:, 1]
print('Regularization term: ' + str(regularize))
print('Penalization term: ' + str(penalize))
print('Accuracy for LSI is: ' + str(metrics.accuracy_score(actual_class_passed, class_which_was_predicted)))
print('Precision for LSI is: ' + str(metrics.precision_score(actual_class_passed, class_which_was_predicted, average='macro')))
print('Recall for LSI is: ' + str(metrics.recall_score(actual_class_passed, class_which_was_predicted, average='macro')))
print('Confusion matrix for LSI is: ' + str(metrics.confusion_matrix(actual_class_passed, class_which_was_predicted)))
false_positive_rate_LSI, true_positive_rate_LSI, c = roc_curve(actual_class_passed, predict_probability)
plt.figure(1)
plt.plot(false_positive_rate_LSI, true_positive_rate_LSI)
plt.plot([0, 1], [0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('Flase Positive Rate')
plt.title('ROC Curve of LSI Logistic Regression Classification With min_df=2')
return metrics.accuracy_score(actual_class_passed, class_which_was_predicted)
for x in range(-7, 7):
l1_accuracy.append(logistic_regression(pow(10, x), 'l1'))
l2_accuracy.append(logistic_regression(pow(10, x), 'l2'))
plt.figure(2)
x_labels = ['0.0000001', '0.000001', '0.00001', '0.0001', '0.001', '0.01', '0.1', '1', '10', '100', '1000', '10000', '100000', '1000000']
y_labels = ['0', '20%', '40%', '60%', '80%', '100%']
plt.plot(range(-7, 7), l1_accuracy, 's', label='l1 Norm Regularization', c='b')
plt.plot(range(-7, 7), l1_accuracy, c='b')
plt.plot(range(-7, 7), l2_accuracy, 'D', label='l2 Norm Regularization', c='g')
plt.plot(range(-7, 7), l2_accuracy, c='g')
plt.ylabel('Total Accuracy of Classification')
plt.xlabel('Regularization Term')
plt.title('Accuracy vs. Regularization Term')
plt.show()
|
[
"ashwinkumar.kannan@gmail.com"
] |
ashwinkumar.kannan@gmail.com
|
9eb8d914c3ecf4d460b4c1e5ab1915dfaaad58c3
|
b2db52eff7b186c16d83afc0fe0a7277648d8975
|
/kvak/functions/bot_updates.py
|
057fa8df6cbff2313b4cced205e8ac99beb9b1c4
|
[] |
no_license
|
HolyHelicopter/kvak
|
d99cc06c4b538f1a2f3bf632ff39cd85a32c219e
|
3a6f5db2c7c439f8ef88627a7c37d47b258d09eb
|
refs/heads/master
| 2023-01-06T10:04:06.893331
| 2020-11-03T14:35:34
| 2020-11-03T14:35:34
| 302,491,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
import requests
import random
import datetime
import time
TG_URL = "https://api.telegram.org/bot1221959365:AAHAZKkaa5hJF0bchJFfVM9uT9Hhv-jOfzg/"
BING_URL = "https://bing-image-search1.p.rapidapi.com/images/search?q="
BING_HEADERS = {
"x-rapidapi-host": "bing-image-search1.p.rapidapi.com",
"x-rapidapi-key": "fb3dd38f00msh738d9f9b25b29acp13d692jsn8859445253e3",
"useQueryString": 'true'
}
BACKUP_QUERIES = [
'лягушка мем',
'лягушка косплей',
'лягушка сказка',
'лягушка нарисованная',
'лягушка смешная',
'лягушка кермит'
]
def bot_updates(updates):
try:
for update in updates:
if 'message' in update:
message = update['message']
chat_id = message['chat']['id']
if 'text' in message and message['text']:
message_text = message['text']
lowercase_text = message_text.replace('К', 'к').replace('В', 'в').replace('А', 'а')
if 'квак' in lowercase_text:
words = message_text.split()
words_temp = []
for word in words:
if 'квак' not in word.replace('К', 'к').replace('В', 'в').replace('А', 'а'):
words_temp.append(word)
words = words_temp
query = 'лягушка'
for word in words:
query += ' ' + word
print(query)
found_images = requests.get(
BING_URL + query,
headers=BING_HEADERS
).json()['value']
if not len(found_images):
print('no results')
backup_index = random.randint(0, len(BACKUP_QUERIES) - 1)
backup_query = BACKUP_QUERIES[backup_index]
print(backup_query)
found_images = requests.get(
BING_URL + backup_query,
headers=BING_HEADERS
).json()['value']
if len(found_images):
image_index = random.randint(0, len(found_images) - 1)
image_url = found_images[image_index]['contentUrl']
try:
requests.post(
TG_URL + 'sendPhoto',
{
'chat_id': chat_id,
'photo': image_url
}
)
except Exception as e:
print(str(e))
image_index = random.randint(0, len(found_images) - 1)
image_url = found_images[image_index]['contentUrl']
try:
requests.post(
TG_URL + 'sendPhoto',
{
'chat_id': chat_id,
'photo': image_url
}
)
except Exception as e:
pass
# file_content = requests.get(image_url)
# file_content = file_content.content
# data = {
# 'chat_id': '811288345',
# }
# requests.post(
# "https://api.telegram.org/bot1221959365:AAHAZKkaa5hJF0bchJFfVM9uT9Hhv-jOfzg/sendPhoto",
# data=data,
# files={'photo': ('квак.jpg', file_content)}
# )
except Exception as e:
print(e)
current_time = datetime.datetime.now()
time_end = current_time + datetime.timedelta(hours=20)
updates_response = requests.post(TG_URL + 'getUpdates').json()
if 'result' in updates_response and len(updates_response['result']):
last_update_id = updates_response['result'][-1]['update_id']
offset = last_update_id
while current_time < time_end:
updates_response = requests.post(TG_URL + 'getUpdates', {'offset': offset}).json()
if 'result' in updates_response and len(updates_response['result']):
updates = updates_response['result']
bot_updates(updates)
last_update_id = updates[-1]['update_id']
offset = last_update_id + 1
time.sleep(10)
current_time = datetime.datetime.now()
|
[
"holyhelicopter@yandex.ru"
] |
holyhelicopter@yandex.ru
|
a1f924be1664e8f0574715c1ec97c9fe4238ee03
|
814a5ccb1e6275b604cae965cd34f4b857b198cc
|
/opencv-start/morphologicalTransformations.py
|
3d7c91d72db6a27d6cbe01edd1dabd45366345ac
|
[] |
no_license
|
Arijit02/opencv-start
|
ea9adfca4185ba5307000a856b7229c34f094ce0
|
5e3562488adc6072593b5042fdb3c8f875206cf2
|
refs/heads/master
| 2022-11-30T02:45:38.973852
| 2020-08-12T03:24:05
| 2020-08-12T03:24:05
| 264,355,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
filePath = os.path.dirname(__file__)
imagePath = os.path.join(filePath, "../images/smarties.png")
img = cv2.imread(imagePath, 0)
_, mask = cv2.threshold(img, 220, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones([3, 3], np.uint8)
# kernel2 = np.ones([2, 2], np.uint8)
dialation = cv2.dilate(mask, kernel, iterations=2)
erosion = cv2.erode(mask, kernel, iterations=2)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=2)
mg = cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel, iterations=2)
th = cv2.morphologyEx(mask, cv2.MORPH_TOPHAT, kernel, iterations=2)
titles = ['Original Image', 'Mask', 'Dialation',
'Erosion', 'Opening', 'closing', 'gradient', 'tophat']
images = [img, mask, dialation, erosion, opening, closing, mg, th]
for i in range(8):
plt.subplot(2, 4, i+1), plt.imshow(images[i], "gray")
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
|
[
"arijitjuite23@gmail.com"
] |
arijitjuite23@gmail.com
|
046559d003fa8dab31306f89a50e26d810e7dbb2
|
0a3fd2d1f27712271903a593fb8acce711efe44b
|
/actions/admin.py
|
c0c12b255d956f944120c5fef81c5e5f92f42a1e
|
[] |
no_license
|
SanjarRakhmonov/qwertyuiiiiiiur
|
018c6a60212dae6eeb4a667aeaa6a9c3320b1225
|
1bcf32b3d6f26cf985da0b3ccab4aaec64e93262
|
refs/heads/master
| 2021-01-19T17:25:31.827933
| 2017-01-28T07:34:02
| 2017-01-28T07:34:02
| 82,455,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django.contrib import admin
from .models import Action
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'date')
list_filter = ('date',)
search_fields = ('verb',)
admin.site.register(Action, ActionAdmin)
|
[
"sanjarbekraxmonov@gmail.com"
] |
sanjarbekraxmonov@gmail.com
|
a11c4102f7cd07bc3caf030828b40fd6cdbf9b43
|
784d300337af936cb0cd7e3a3fc95805a5ab8524
|
/nets/layers.py
|
f3816e6dec3aefb415c4cbb57bd93489cdd3f559
|
[] |
no_license
|
xiangchao2018/Keras-Mask-RCNN
|
cf16e3df4fad496be1f2737879c34cfef89835d4
|
8eac95cdc4e4049b13507f1bc29e09ddf27da52b
|
refs/heads/main
| 2023-02-06T16:11:46.793744
| 2020-12-27T16:41:56
| 2020-12-27T16:41:56
| 324,801,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,131
|
py
|
import tensorflow as tf
from keras.engine import Layer
import numpy as np
from utils import utils
#----------------------------------------------------------#
# Proposal Layer
# 该部分代码用于将先验框转化成建议框
#----------------------------------------------------------#
def apply_box_deltas_graph(boxes, deltas):
# 计算先验框的中心和宽高
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# 计算出调整后的先验框的中心和宽高
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# 计算左上角和右下角的点的坐标
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(Layer):
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
# [rpn_class, rpn_bbox, anchors]
def call(self, inputs):
# 代表这个先验框内部是否有物体[batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# 代表这个先验框的调整参数[batch, num_rois, 4]
deltas = inputs[1]
# [0.1 0.1 0.2 0.2],改变数量级
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# 筛选出得分前6000个的框
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
# 获得这些框的索引
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
# 获得这些框的得分
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
# 获得这些框的调整参数
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
# 获得这些框对应的先验框
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# [batch, N, (y1, x1, y2, x2)]
# 对先验框进行解码
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# [batch, N, (y1, x1, y2, x2)]
# 防止超出图片范围
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# 非极大抑制
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# 如果数量达不到设置的建议框数量的话
# 就padding
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
#----------------------------------------------------------#
# ROIAlign Layer
# 利用建议框在特征层上截取内容
#----------------------------------------------------------#
def log2_graph(x):
return tf.compat.v1.log(x) / tf.compat.v1.log(2.0)
def parse_image_meta_graph(meta):
"""
将meta里面的参数进行分割
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
class PyramidROIAlign(Layer):
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# 建议框的位置
boxes = inputs[0]
# image_meta包含了一些必要的图片信息
image_meta = inputs[1]
# 取出所有的特征层[batch, height, width, channels]
feature_maps = inputs[2:]
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# 获得输入进来的图像的大小
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# 通过建议框的大小找到这个建议框属于哪个特征层
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
# batch_size, box_num
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
# 分别在P2-P5中进行截取
for i, level in enumerate(range(2, 6)):
# 找到每个特征层对应box
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
box_to_level.append(ix)
# 获得这些box所属的图片
box_indices = tf.cast(ix[:, 0], tf.int32)
# 停止梯度下降
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
pooled = tf.concat(pooled, axis=0)
# 将顺序和所属的图片进行堆叠
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# box_to_level[:, 0]表示第几张图
# box_to_level[:, 1]表示第几张图里的第几个框
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
# 进行排序,将同一张图里的某一些聚集在一起
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
# 按顺序获得图片的索引
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# 重新reshape为原来的格式
# 也就是
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
#----------------------------------------------------------#
# Detection Layer
#
#----------------------------------------------------------#
def refine_detections_graph(rois, probs, deltas, window, config):
"""细化分类建议并过滤重叠部分并返回最终结果探测。
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# 找到得分最高的类
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# 序号+类
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
# 取出成绩
class_scores = tf.gather_nd(probs, indices)
# 还有框的调整参数
deltas_specific = tf.gather_nd(deltas, indices)
# 进行解码
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# 防止超出0-1
refined_rois = clip_boxes_graph(refined_rois, window)
# 去除背景
keep = tf.where(class_ids > 0)[:, 0]
# 去除背景和得分小的区域
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.compat.v1.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.compat.v1.sparse_tensor_to_dense(keep)[0]
# 获得除去背景并且得分较高的框还有种类与得分
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. 进行非极大抑制
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. 找到符合要求的需要被保留的建议框
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.compat.v1.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.compat.v1.sparse_tensor_to_dense(keep)[0]
# 寻找得分最高的num_keep个框
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.compat.v1.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# 如果达不到数量的话就padding
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
def norm_boxes_graph(boxes, shape):
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
class DetectionLayer(Layer):
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# 找到window的小数形式
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
#----------------------------------------------------------#
# Detection Target Layer
# 该部分代码会输入建议框
# 判断建议框和真实框的重合情况
# 筛选出内部包含物体的建议框
# 利用建议框和真实框编码
# 调整mask的格式使得其和预测格式相同
#----------------------------------------------------------#
def overlaps_graph(boxes1, boxes2):
"""
用于计算boxes1和boxes2的重合程度
boxes1, boxes2: [N, (y1, x1, y2, x2)].
返回 [len(boxes1), len(boxes2)]
"""
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# 移除之前获得的padding的部分
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# 计算建议框和所有真实框的重合程度 [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# 计算和 crowd boxes 的重合程度 [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. 正样本建议框和真实框的重合程度大于0.5
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. 负样本建议框和真实框的重合程度小于0.5,Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# 进行正负样本的平衡
# 取出最大33%的正样本
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# 保持正负样本比例
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# 获得正样本和负样本
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# 获取建议框和真实框重合程度
positive_overlaps = tf.gather(overlaps, positive_indices)
# 判断是否有真实框
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
# 找到每一个建议框对应的真实框和种类
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# 解码获得网络应该有得预测结果
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# 切换mask的形式[N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# 取出对应的层
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# 防止resize后的结果不是1或者0
masks = tf.round(masks)
# 一般传入config.TRAIN_ROIS_PER_IMAGE个建议框进行训练,
# 如果数量不够则padding
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
def trim_zeros_graph(boxes, name='trim_zeros'):
"""
如果前一步没有满POST_NMS_ROIS_TRAINING个建议框,会有padding
要去掉padding
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
class DetectionTargetLayer(Layer):
"""找到建议框的ground_truth
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)]建议框
gt_class_ids: [batch, MAX_GT_INSTANCES]每个真实框对应的类
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]真实框的位置
gt_masks: [batch, height, width, MAX_GT_INSTANCES]真实框的语义分割情况
Returns:
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]内部真实存在目标的建议框
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]每个建议框对应的类
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]每个建议框应该有的调整参数
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]每个建议框语义分割情况
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# 对真实框进行编码
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
|
[
"424236969@qq.com"
] |
424236969@qq.com
|
27cf3570d3f770805865182d662d6edb2303990c
|
0a0bf0c955e98ffebf0bee81496291e984366887
|
/utils/config/model_config.py
|
994e3a34da5f03363479a1f0877879260ee3f709
|
[] |
no_license
|
MaxinAI/school-of-ai
|
11ee65c935638b8bb9f396f25c943bd6e8e7fc0f
|
3c8f11ae6cb61df186d4dfa30fa5aba774bfbeba
|
refs/heads/master
| 2023-01-22T17:24:33.208956
| 2023-01-20T14:49:40
| 2023-01-20T14:49:40
| 212,200,415
| 52
| 77
| null | 2020-04-10T07:15:06
| 2019-10-01T21:11:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 986
|
py
|
"""
Created on Apr 23, 2020
Configuration scripts for model
@author: Levan Tsinadze
"""
from argparse import Namespace
import torch
from torch import nn
from torch.jit import trace, ScriptModule
# Config Parameters
_DEF_DEVICE = 'cuda'
_CPU_DEVICE = 'cpu'
GPU = _DEF_DEVICE
CPU = _CPU_DEVICE
def init_device(conf: Namespace) -> str:
"""
Initialize device to bind model abd data
Args:
conf: configuration parameters
Returns:
device name
"""
return GPU if conf.gpu and torch.cuda.is_available() else CPU
@torch.no_grad()
def script_model(model: nn.Module, sizes: list) -> ScriptModule:
"""
Generates converts model to the cript model
Args:
model: model to convert
sizes: sizes of input
Returns:
graph_model: converted model
"""
xs = tuple(torch.randn(1, 3, s, s, requires_grad=False) for s in sizes)
graph_model = trace(model.eval(), xs)
graph_model.eval()
return graph_model
|
[
"levantsinadze2gmail.com"
] |
levantsinadze2gmail.com
|
a147ddfe91435e2ad013884b37a36e5a95b9627b
|
01ad8a5868befae1530ab631ffde2b3965fe9776
|
/Maya/realtimeExp/Read_Csv_File_01.py
|
560b18161313f52730a7fe20da32d0ee9110df2c
|
[] |
no_license
|
hamin7/BCI_and_Face_Tracking
|
0b91a031e9b4d97a04ca00e66d0bc0a3c3134b51
|
efcb97fe2cae0fe2aa5d408a33151da1c1f3feb4
|
refs/heads/master
| 2023-01-20T06:53:27.902984
| 2019-11-01T12:12:49
| 2019-11-01T12:12:49
| 161,595,179
| 0
| 0
| null | 2023-01-05T21:54:20
| 2018-12-13T06:39:17
|
Python
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
import argparse, csv, sys
from settings import *
# command arguments
parse = argparse.ArgumentPareser(description='csv to postgres', \
fromfile_prefix_chars="@" )
parser.add_argument('file', help='csv file to import', action='store')
args = parser.parse_args()
csv_file = args.file
# open csv file
with open(csv_file, 'rb') as csvfile:
# get number of columns
for line in csvfile.readlines():
array = line.split(',')
first_item = array[0]
num_columns = len(array)
csvfile.seek(0)
reader = csv.reader(csvfile, delimiter=' ')
included_cols = [1, 2, 6, 7]
for row in reader:
content = list(row[i] for i in included_cols)
print content
|
[
"noreply@github.com"
] |
hamin7.noreply@github.com
|
47a0f815d211b046ead0420671156e07fefd4628
|
64c4350ac51ed53035e870f3af182f07d3b5d1b7
|
/numpy_trigo1.py
|
97df81739c60f8b5f6c08e3a04bea5f642a2a7f2
|
[
"MIT"
] |
permissive
|
mercadder/python
|
5c9de1ec1eb8496f1031b4b47f1fb84435598e9e
|
85a005fee24a613ac5bb33847cccc8e32f1ceebd
|
refs/heads/master
| 2021-05-10T13:28:16.684954
| 2018-01-22T16:14:58
| 2018-01-22T16:14:58
| 118,475,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#%%
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
x = np.linspace(0, 30, 100)
plt.plot(x * np.pi / 180, np.sin(x))
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x)')
plt.axis('tight')
plt.show()
|
[
"noreply@github.com"
] |
mercadder.noreply@github.com
|
22309c4be0e78b1e73713c16f8bf4caed4d6134c
|
3682b03a42deb68f33a23ae3cdf3c3b1d09b9e5c
|
/project/project/urls.py
|
b32b28e32aa890bd4970c7b121603c007cfbd00f
|
[] |
no_license
|
hfaezeh/test
|
9616683f906a35d0ef4cc52c58d5f9d935df80c4
|
68b610b67f561d70b72f1b130ebb2167a9914f8d
|
refs/heads/master
| 2020-03-12T04:43:38.185605
| 2018-04-30T11:09:15
| 2018-04-30T11:09:15
| 130,450,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include, url
from django.contrib import admin
from app.views import *
from users.views import *
from polls.views import *
from project.routers import HybridRouter
# We use a single global DRF Router that routes views from all apps in project
router = HybridRouter()
# app views and viewsets
router.register(r'tool', ToolViewSet, r"tool")
router.add_api_view(r'author', url(r'^author/(?P<pk>.*)$',AuthorViewSet.as_view(), name=r"author"))
router.register(r'book', BookViewSet, r"book")
router.register(r'user', UserViewSet, r"user")
router.register(r'user_test', UserModelViewSet, r'user_test')
router.add_api_view(r'auth', url(r'^auth/$', ObtainAuthToken.as_view(), name=r"auth"))
router.add_api_view(r'file', url(r'^file/(?P<pk>.*)$', FileViewSet.as_view(), name=r'file'))
urlpatterns = [
# default django admin interface (currently unused)
url(r'^admin/', include(admin.site.urls)),
# root view of our REST api, generated by Django REST Framework's router
url(r'^api/', include(router.urls, namespace='api')),
# index page should be served by django to set cookies, headers etc.
url(r'^$', index_view, {}, name='index'),
#url(r'^api/upload', upload_file, {}, name='upload'),
]
# let django built-in server serve static and media content
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"faezehhaghbayan@faezehs-MacBook-Pro.local"
] |
faezehhaghbayan@faezehs-MacBook-Pro.local
|
2be9b6c0d13edf15e6972abee4e9f80dd8778976
|
d8711e79bc7784af91f1807a49842ffaf80280cd
|
/src/BluetoothPoller.py
|
7278d37253be9e6e8936bc0bb4a67e0a66305762
|
[] |
no_license
|
mehdilauters/wifiScanMap
|
0b401f7bceac53896434e95df42fa9b455f0f5b8
|
9adcd08e43db5dcb7700450abed00c97e140ef21
|
refs/heads/master
| 2020-12-15T03:57:02.244177
| 2017-07-27T10:50:46
| 2017-07-27T10:50:46
| 53,189,162
| 141
| 33
| null | 2016-12-11T17:26:07
| 2016-03-05T07:20:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
from threading import Lock
import threading
import time
import subprocess
import re
import PrctlTool
class BluetoothPoller(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
self.application = app
self.lock = Lock()
self.stations = []
self.running = True #setting the thread running to true
self.major_device_description = {
0b00000: 'miscalleneous',
0b00001: 'computer',
0b00010: 'mobile',
0b00011: 'lan',
0b00100: 'audio',
0b00101: 'peripheral',
0b00110: 'imaging',
0b00111: 'wearable',
0b01000: 'toy',
0b01001: 'health',
0b11111: 'unknown',
}
if self.application.args.sleep is not None:
self.sleep = int(self.application.args.sleep)
else:
self.sleep = 1
def parse_class(self, _class):
return (_class >> 8 & 0b0000000000011111)
def get_major_device_description(self, major):
try:
return self.major_device_description[major]
except:
self.application.log('bluetooth', 'invalid class %s'%major)
def run(self):
PrctlTool.set_title('bluetooth poller')
try:
while self.running:
cmd = ['hcitool', 'inq']
pos = self.application.getPosition()
fix = pos is not None
if fix:
lon, lat, source = pos
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
(stdoutdata, stderrdata) = process.communicate();
res = re.findall("\s(.*)\sclock.*\sclass:\s(.*)", stdoutdata)
stations = []
if res is not None:
for row in res:
station = {}
if fix:
station["latitude"] = lat
station["longitude"] = lon
station["gps"] = source == 'gps'
station['bssid'] = row[0].strip()
station['manufacturer'] = self.application.getManufacturer(station['bssid'])
station['class'] = int(row[1].strip(), 0)
station['class_description'] = self.get_major_device_description(self.parse_class(station['class']))
cmd = ['hcitool', 'name', station['bssid']]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
(stdoutdata, stderrdata) = process.communicate();
station['name'] = stdoutdata
stations.append(station)
with self.lock:
self.stations = stations
time.sleep(self.sleep)
except:
self.application.log('bluetooth', 'error')
def getNetworks(self):
with self.lock:
return self.networks
def stop(self):
self.running = False
|
[
"mlauters@fly-n-sense.com"
] |
mlauters@fly-n-sense.com
|
fb0d6fd04de3f5e3c01fd84c22bf7d97878deb39
|
6ee2af4e2e453927030a7ce88f246ec948536f01
|
/build/catkin_generated/generate_cached_setup.py
|
36a6a9b17749c2fde23d770c6666c6560646026a
|
[] |
no_license
|
abhishekbalu/rosqt-publisher
|
67b980104cbca541bc7f318b6abde5d71f03bd0d
|
c636e14316631c888066d47d7e582812de1d2ded
|
refs/heads/master
| 2021-01-18T22:14:00.983005
| 2016-10-30T14:33:36
| 2016-10-30T14:33:36
| 72,354,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/jade/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/jade/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/jade".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/abhishek/jade_workspace/abhishek22/build/devel/env.sh')
output_filename = '/home/abhishek/jade_workspace/abhishek22/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"abhisheklokesh6008@gmail.com"
] |
abhisheklokesh6008@gmail.com
|
6c0d5bbd04735a5cb28455fee37f5ad5beb791d7
|
bd1362c60313784c90013dfc9f0169e64389bf27
|
/scripts/asos/wind_chill_hours.py
|
aaccb84c833aa21cf7bbb32f0946b0e7ddf8734a
|
[] |
no_license
|
ForceCry/iem
|
391aa9daf796591909cb9d4e60e27375adfb0eab
|
4b0390d89e6570b99ca83a5fa9b042226e17c1ad
|
refs/heads/master
| 2020-12-24T19:04:55.517409
| 2013-04-09T14:25:36
| 2013-04-09T14:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from pyIEM import iemdb
import mx.DateTime, sys
i = iemdb.iemdb()
asos = i['asos']
for yr in range(1973,2009):
hrs = [0,0,0,0]
sql = "SELECT to_char(valid, 'YYYY-MM-DD HH24') as d, min(wind_chill(tmpf,sknt)) as wc from t%s WHERE \
station = '%s' and tmpf < 32 and tmpf > -50 and sknt > 0 \
and valid > '%s-10-01' GROUP by d" % (yr, sys.argv[1], yr)
rs = asos.query(sql).dictresult()
for i in range(len(rs)):
wc = float( rs[i]['wc'] )
if (wc < -30):
hrs[0] += 1
if (wc < -20):
hrs[1] += 1
if (wc < -10):
hrs[2] += 1
if (wc < 0):
hrs[3] += 1
sql = "SELECT to_char(valid, 'YYYY-MM-DD HH24') as d, min(wind_chill(tmpf,sknt)) as wc from t%s WHERE \
station = '%s' and tmpf < 32 and tmpf > -50 and sknt > 0 \
and valid < '%s-04-01' GROUP by d" % (yr+1, sys.argv[1], yr+1)
rs = asos.query(sql).dictresult()
for i in range(len(rs)):
wc = float( rs[i]['wc'] )
if (wc < -30):
hrs[0] += 1
if (wc < -20):
hrs[1] += 1
if (wc < -10):
hrs[2] += 1
if (wc < 0):
hrs[3] += 1
print "%s,%s,%s,%s,%s" % (yr, hrs[0],hrs[1],hrs[2],hrs[3])
|
[
"akrherz@95f8c243-6001-0410-b151-932e6a9ed213"
] |
akrherz@95f8c243-6001-0410-b151-932e6a9ed213
|
73528192301a76efed6e1f6ff204022615c0be6f
|
6f7a8d28be6af8116b5876df4c804bfc1997580c
|
/async_reduce/__init__.py
|
974e22118be551a6120882fd983cb46b09ad6109
|
[
"MIT"
] |
permissive
|
tzoiker/async-reduce
|
375c12ad0c5976e4062215f4338377a8336055de
|
2ece769e09628bf00cb35af32044b55d1946828a
|
refs/heads/master
| 2020-05-05T05:12:41.177854
| 2019-03-31T12:53:54
| 2019-04-04T17:10:51
| 179,743,101
| 0
| 0
|
MIT
| 2019-04-05T19:32:28
| 2019-04-05T19:32:28
| null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from .async_reducer import AsyncReducer
from .async_reduceable import async_reduceable
__all__ = 'async_reduce', 'async_reduceable',
async_reduce = AsyncReducer()
|
[
"sirkonst@gmail.com"
] |
sirkonst@gmail.com
|
0a1230cf13d2fbd86cd3dce52b2abb63f19c142a
|
451e9ea8a8c4317bc03b4832d3093b8317a12e08
|
/weather/views.py
|
a2219087a28327d064e96b4040998d4873281d84
|
[] |
no_license
|
MehediHasanNasim/Weather-Checking-API
|
db73f36b1e6a800694e97ae5b0e3595945c5d0ad
|
49dcda33cb58371f2bc6b506cf072951d717c49d
|
refs/heads/main
| 2023-06-03T19:56:10.091400
| 2021-06-24T19:12:15
| 2021-06-24T19:12:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
from django.shortcuts import render, redirect, HttpResponseRedirect, get_object_or_404
# Create your views here.
def home(request):
import json
import requests
if request.method == "POST":
zipcode = request.POST['zipcode']
#return render(request, 'home.html', {'zipcode': zipcode})
api_request = requests.get("https://www.airnowapi.org/aq/observation/zipCode/current/?format=application/json&zipCode=" + zipcode + "&distance=5&API_KEY=B2910B43-3265-49E4-BE07-40DB21B3DDDE")
#https://www.airnowapi.org/aq/observation/zipCode/current/?format=application/json&zipCode=20001&distance=5&API_KEY=B2910B43-3265-49E4-BE07-40DB21B3DDDE
try:
api = json.loads(api_request.content)
except Exception as e:
api = "Error...."
if api[0]['Category']['Name'] == "Good":
category_description = "(0-50) Air quality is considered satisfactory"
category_color = "good"
elif api[0]['Category']['Name'] == "Moderate":
category_description= "(51-100) Air is acceptable"
category_color = "moderate"
elif api[0]['Category']['Name'] == "Unhealty for Sensitive Groups":
category_description= "(101-150) Risk for weak lung people"
category_color = "USG"
elif api[0]['Category']['Name'] == "Unhealthy":
category_description= "(151-200) Everyone will have suffer in health issue"
category_color = "unhealthy"
elif api[0]['Category']['Name'] == "Very Unhealthy":
category_description= "(201-250) it will effect seriously"
category_color = "veryunhealthy"
elif api[0]['Category']['Name'] == "Hazardous":
category_description= "(251-300) Health warning emergency condition"
category_color = "hazardous"
diction= {
'api': api,
'category_description':category_description,
'category_color':category_color,}
return render(request, 'home.html', context= diction)
else:
return render(request, 'home.html', )
def about(request):
diction= {}
return render(request, 'about.html', context= diction)
|
[
"75909031+MehediHasanNasim@users.noreply.github.com"
] |
75909031+MehediHasanNasim@users.noreply.github.com
|
5bd01557125f2b6645afe7314023655b308fda80
|
0374289f671d93a0d1d2b14fd813b88a4dd81f6b
|
/chatbot-master/cali_main.py
|
7e3891c1524004352cf06a63dbf8fba2d1778c0e
|
[] |
no_license
|
dhruvbabbar/chatbot
|
9b62d4b1e0418926e020906952a7303eca018e5e
|
f6b27615452625d3da04e5a2207d5a514c0d71a7
|
refs/heads/master
| 2020-03-08T03:31:36.909623
| 2018-05-06T13:55:56
| 2018-05-06T13:55:56
| 127,893,610
| 0
| 0
| null | 2018-05-06T13:55:56
| 2018-04-03T10:48:43
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
def calendar(request, year, month):
my_workouts = Workouts.objects.order_by('my_date').filter(
my_date__year=year, my_date__month=month
)
cal = WorkoutCalendar(my_workouts).formatmonth(year, month)
return render_to_response('my_template.html', {'calendar': mark_safe(cal),})
|
[
"dhruvbabbar349@gmail.com"
] |
dhruvbabbar349@gmail.com
|
675c9c10775fd79f1259f821aa47ffad8afe99ba
|
28111c4fa919b14ff2f78be30035f7d90a08ab1e
|
/crawls/crawls/spiders/baldor.py
|
9e4dfdc079868243244e1595ee5d45d60340b8e5
|
[] |
no_license
|
HeraskoA/crawls
|
a4c07858075495062319d646861f734fa7201e38
|
887cd844847f33178709c3abdd8a770d94e899a6
|
refs/heads/master
| 2021-01-21T14:39:32.619133
| 2017-10-24T14:37:11
| 2017-10-24T14:37:11
| 95,320,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,977
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
import scrapy
from crawls.items import BaldorDodgeItem
import re
out = pd.read_csv("crawls/spiders/data/diff_baldor.csv", sep=',')
catalog = [str(item).strip() for item in list(out.catalog_number)]
description = list(out.description)
ids = list(out.id)
catalog_descr = dict(zip(catalog, description))
catalog_ids = dict(zip(catalog, ids))
class Mcr(scrapy.Spider):
name = "baldor_dodge"
def start_requests(self):
for row in catalog:
yield self.request(row)
def request(self, row):
url = 'http://www.baldor.com/catalog/' + row
return scrapy.Request(url=url,
callback=self.parse_item,
dont_filter=True,
meta={'row': row}
)
def create_item(self, row, img, doc_name, doc_url, specs):
item = BaldorDodgeItem()
item['ids'] = catalog_ids[row]
item['catalog_number'] = row
item['description'] = catalog_descr[row]
item['img'] = img
item['doc_name'] = doc_name
item['doc_url'] = doc_url
item['specs'] = specs
return item
def construct_table(self, table):
table = table.replace('<div class="section detail-table product-overview">', '<table>')
table = re.sub(r'</div>(\n|\s)+<div class="col span_1_of_2">', '', table)
table = table.replace('<div class="col span_1_of_2">', '')
table = re.sub(r'</div>(\n|\s)+</div>(\n|\s)+</div>(\n|\s)+</div>', '</div></table></div>', table)
table = re.sub(r'</div>(\n|\s)+<div>', '</tr><tr>', table)
table = table.replace('</div></table>', '</tr></table>')
table = table.replace('<div>', '<tr>')
table = table.replace('<span class="label">', '<td>').replace('<span class="value">', '<td>')
table = table.replace('</span>', '</td>')
return table
def custom_extractor(self, response, expression):
data = response.xpath(expression).extract_first()
return data if data else ''
def parse_item(self, response):
row = response.meta['row']
img = self.custom_extractor(response, '//*[@id="catalog-detail"]/img/@data-src')
img = 'http://www.baldor.com' + img + '?bc=white&as=1&h=256&w=256' if img != '/api/images/451' else ''
specs = self.custom_extractor(response, '//div[@data-tab="specs"]')
specs = self.construct_table(specs) if specs != '' else ''
key = response.xpath('//*[@id="nav-desktop-breadcrumb"]/ul/li/a/text()').extract()[-1]
key_tire = 0
try:
int(key.split()[-1])
except Exception:
pass
else:
key_tire = key.replace(' ', '-')
key_upper = key.upper()
doc_name, doc_url = '', ''
expression = '//a[@class="recordClick" and text()="%s"]' % key
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and text()="%s"]' % key_upper
item = response.xpath(expression)
if not item and key_tire:
expression = '//a[@class="recordClick" and text()="%s"]' % key_tire
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and starts-with(text(), "%s")]' % key
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and starts-with(text(), "%s")]' % key_upper
item = response.xpath(expression)
if not item and key_tire:
expression = '//a[@class="recordClick" and starts-with(text(), "%s")]' % key_tire
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and text()="Dodge %s"]' % key
item = response.xpath(expression)
if not item and key_tire:
expression = '//a[@class="recordClick" and text()="Dodge %s"]' % key_tire
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and contains(text(), "Dodge") and contains(text(), "%s")]' % key
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and contains(text(), "Dodge") and contains(text(), "%s")]' % key_upper
item = response.xpath(expression)
if not item and key_tire:
expression = '//a[@class="recordClick" and contains(text(), "Dodge") and contains(text(), "%s")]' % key_tire
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and contains(text(), "%s")]' % key
item = response.xpath(expression)
if not item:
expression = '//a[@class="recordClick" and contains(text(), "%s")]' % key_upper
item = response.xpath(expression)
if not item and key_tire:
expression = '//a[@class="recordClick" and contains(text(), "%s")]' % key_tire
item = response.xpath(expression)
if not item:
key_split = key.split()
for part in key_split:
expression = '//a[@class="recordClick" and contains(text(), "%s")]' % part
item = response.xpath(expression)
if item:
break
if not item:
item = response.xpath('//ul[@class="list-icon-document"]/li[1]/a')
doc_name = self.custom_extractor(item, './text()')
doc_url = item.xpath('./@href').extract_first()
doc_url = response.urljoin(doc_url) if doc_url else ''
return self.create_item(row, img, doc_name, doc_url, specs)
|
[
"andrey.herasko@gmail.com"
] |
andrey.herasko@gmail.com
|
f0752a02362e4d75af1e71dddab5a542414153f0
|
fa87189eb0783c233e4d3cd52f790a59ec63445d
|
/Problem 2 2021 lab3.py
|
cbf1999a0654cd6cfc7b0710be6ae2ac8ba6424d
|
[] |
no_license
|
jkendall5490/HELLOWORLD
|
8223b8dd7da9e2235763f2f0c8ca92d87e3078d6
|
85f12632c025634ab6a69a7fe398b809a7cf62e6
|
refs/heads/main
| 2023-03-24T23:01:18.978074
| 2021-03-18T10:26:49
| 2021-03-18T10:26:49
| 303,903,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#shop.py
def check_money(total_cost, customer_money):
#Your code here
if customer_money - total_cost >=0:
return True
else:
return False
#This should print False
can_pay = check_money(107, 49)
print(can_pay)
#This should print True
can_pay = check_money(6, 88)
print(can_pay)
|
[
"noreply@github.com"
] |
jkendall5490.noreply@github.com
|
1deef8003d76f7e77fa621034bcf1085241dcd05
|
f52e7443be4418eebc7ed51f56a879617b00f088
|
/7b.py
|
953487043bc15f1e7425b5ab966ea7d456e9c95a
|
[] |
no_license
|
nathanleiby/advent-of-code-2018
|
9fe43a97a80290338b8724e2c07c8560f16d9d3b
|
047ec34ad936e33fc5d530f8403aa59983325bbc
|
refs/heads/master
| 2020-04-09T10:05:31.905048
| 2018-12-14T06:01:58
| 2018-12-14T06:01:58
| 160,257,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
from toposort import toposort, toposort_flatten
ex_i = {
'A': {'C'},
'F': {'C'},
'B': {'A'},
'D': {'A'},
'E': {'B', 'D', 'F'},
}
ex_i_str ="""Step C must be finished before step A can begin.
Step C must be finished before step F can begin.
Step A must be finished before step B can begin.
Step A must be finished before step D can begin.
Step B must be finished before step E can begin.
Step D must be finished before step E can begin.
Step F must be finished before step E can begin."""
def res(inp, num_workers=2, duration_boost=0):
out = []
from copy import deepcopy
inp_c = deepcopy(inp)
available_work = get_available_work(inp_c)
ongoing_work = {} # { letter : remaining_seconds }
current_second = -1
while len(inp_c):
current_second += 1
# do work
to_delete = []
for k in ongoing_work:
ongoing_work[k] -= 1
if ongoing_work[k] == 0:
to_delete.append(k)
# remove complete work
for k in to_delete:
del(ongoing_work[k])
remove_from_graph(inp_c, k)
if len(to_delete):
# ONLY run this once, even if multiple deletions
# update available_work, in case k unblocked new work
available_work += get_available_work(inp_c)
available_work = sorted(list(set(available_work)))
for o in ongoing_work:
available_work.remove(o)
print("available_work", available_work)
# get more work
while len(ongoing_work) < num_workers and len(available_work) > 0:
next_item = available_work[0]
available_work = available_work[1:]
duration = ord(next_item) - 64 + duration_boost
ongoing_work[next_item] = duration
# record order, more relevant to problem 7 but just in case
out += next_item
print("second = ", current_second)
print("ongoing_work = ", ongoing_work)
return ("".join(list(out)), current_second)
# gets next item and mutates underlying graph to remove it
def get_available_work(graph):
# { { 2 , 1 }, { 3 } , { 4 , 5 } } => { 1 , 2 }
o = list(toposort(graph))
if len(o):
return sorted(list(o[0]))
return []
def remove_from_graph(graph, item):
# remove as a top-level key
if item in graph:
del(graph[item])
# remove as a dep
for k in graph:
dep = graph[k]
if item in dep:
dep.remove(item)
def s_to_dag(s):
out = {}
lines = s.splitlines()
for l in lines:
first = l[5]
then = l[36]
if not out.get(then):
out[then] = set()
out[then].add(first)
return out
print(res(ex_i))
assert(res(ex_i) == ("CAFBDE", 15))
with open('./7-input', 'r') as f:
dag = s_to_dag(f.read())
#print(dag)
print("RESULT = ", res(dag, num_workers=5, duration_boost=60))
|
[
"nathan.leiby@clever.com"
] |
nathan.leiby@clever.com
|
19bb961a475c9140cdf1d0f5b1b933a281056798
|
4272f10ab532042daa1d7811e4c314db098f2093
|
/variables/ejercicio03.py
|
824cf433e2940fcce0a10a6cc24bae3b9685f2cd
|
[] |
no_license
|
bl00p1ng/ejercicios-basicos-python
|
780b7050184d75f9a9af5c641bd57e2c13357a4c
|
53b974257d0729a00b0ee57c5eb877845784f176
|
refs/heads/main
| 2023-03-14T06:30:24.925725
| 2021-03-05T21:53:30
| 2021-03-05T21:53:30
| 338,624,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
def run():
# Ejercicio 3
# Crea las variables nombre, direction y teléfono y asígnale los valores correspondientes. Muestra los valores de
# esas variables por pantalla de tal forma que el resultado del programa sea el mismo que en el ejercicio 2.
name = 'Andrés Felipe López'
phone = '509-684-1752'
address = '2000 Calico Drive, Colville WA'
print('Nombre: ' + name)
print('Teléfono: ' + phone)
print('Dirección: ' + address)
if __name__ == '__main__':
run()
|
[
"blooping@protonmail.com"
] |
blooping@protonmail.com
|
bca55882f1cc1e823bbc9e5df57353da55294810
|
0850e1ed6c795a11efd5ded56451c2286578fc34
|
/app.py
|
ec3f4bcacdee7b894425124ef7ae30a26de59f00
|
[] |
no_license
|
ChanonVilaiyuk/app
|
2799e679dd536c3d5d6203da75ffbc4b2f6273c2
|
c7a537c60b93780db1836bc34793c0d6dd794eb6
|
refs/heads/master
| 2021-05-05T10:23:12.221159
| 2017-09-18T14:15:49
| 2017-09-18T14:15:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13
|
py
|
# v001
# v002
|
[
"ta.animator@gmail.com"
] |
ta.animator@gmail.com
|
afa71f64cc9f5d035f2c07e4d8927d8e7f62b598
|
fbfaf9c8047542efb383c8f480dd636437a76343
|
/testsuite/pnoise-gabor/run.py
|
af0d1e028fe263ad1e7033634440b256ed74bd6a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
jeremyselan/OpenShadingLanguage
|
1141bda4578fbacad80116ee1a9deb6d498ef9c9
|
3e2955686dc61bc8104ed9451bf172fc2d2348eb
|
refs/heads/master
| 2021-01-18T04:50:00.061446
| 2012-06-27T22:51:48
| 2012-06-27T22:51:48
| 5,102,217
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
#!/usr/bin/python
command = oslc("../common/shaders/testpnoise.osl")
command += testshade("-g 512 512 -od uint8 -o Cout out.tif -sparam noisename gabor testpnoise")
outputs = [ "out.txt", "out.tif" ]
|
[
"lg@larrygritz.com"
] |
lg@larrygritz.com
|
d719c1b237fa0f671e01bfbafb4c4a3785b95aa2
|
1e5bf4b7ac971ce824e9054c691e0cfdd9d01ee7
|
/98. Validate Binary Search Tree.py
|
c0c37505fd157fbbdd0818013acda9bcce8d5bde
|
[
"MIT"
] |
permissive
|
Nriver/leetcode
|
925bf551ea8d6ee3ab1d17a26544d09b6fa988c0
|
e0b30fae8a31513172fcbd13d67eded8922ba785
|
refs/heads/master
| 2021-06-08T20:38:24.731309
| 2021-05-10T02:02:35
| 2021-05-10T02:02:35
| 162,373,286
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
# -*- coding: utf-8 -*-
# @Author: Zengjq
# @Date: 2018-12-19 10:46:12
# @Last Modified by: Zengjq
# @Last Modified time: 2019-02-20 17:00:12
class Solution:
# 100% 递归最快
def isValidBST(self, root: 'TreeNode') -> 'bool':
# 注意最开始启动递归的时候传的最大和最小值都要是None
return self.isValid(root, None, None)
def isValid(self, root, min, max):
if (root == None):
return True
if (min != None and root.val <= min):
return False
if (max != None and root.val >= max):
return False
return self.isValid(root.left, min, root.val) and self.isValid(root.right, root.val, max)
# 58% solution
# 中序遍历 慢
# def isValidBST(self, root: 'TreeNode') -> 'bool':
# ret = self.inorder(root)
# return ret == sorted(list(set(ret)))
# def inorder(self, root):
# if root is None:
# return []
# return self.inorder(root.left) + [root.val] + self.inorder(root.right)
# Tree definition found in here
# https://leetcode.com/problems/recover-binary-search-tree/discuss/32539/Tree-Deserializer-and-Visualizer-for-Python
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
return 'TreeNode({})'.format(self.val)
def deserialize(string):
if string == '{}':
return None
nodes = [None if val.strip() == 'null' else TreeNode(int(val.strip()))
for val in string.strip('[]{}').split(',')]
kids = nodes[::-1]
root = kids.pop()
for node in nodes:
if node:
if kids:
node.left = kids.pop()
if kids:
node.right = kids.pop()
return root
test_cases = (deserialize('[2, 1, 3]'),
deserialize('[5, 1, 4, null, null, 3, 6]'),
)
solution = Solution()
for test_case in test_cases:
print(solution.isValidBST(test_case))
|
[
"junqing.zeng@gmail.com"
] |
junqing.zeng@gmail.com
|
34d43cfe3bc78b8ff679d3f0730e47094534d1ac
|
47122c110aae10880469e94c969f1d7a58815de2
|
/posts/admin.py
|
d8731ccd6ff032917a0c9e3d0e5fedf40a820251
|
[] |
no_license
|
derrickps/getsocialproject
|
1116586bb90132dc7efd247083a27e7ec1b05e97
|
dfaeb6185639d0d71233eb6b3f2d7c2ae40b3677
|
refs/heads/main
| 2023-05-30T14:26:57.013143
| 2021-06-19T08:01:23
| 2021-06-19T08:01:23
| 378,340,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
from django.contrib import admin
from .models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
# admin.site.register(Like)
# change in admin done in apps.py
|
[
"psderrickit@gmail.com"
] |
psderrickit@gmail.com
|
65a0d661df90d4e3726d1f5122c0dc3bfe463082
|
e3e60898ac87cd758dab7fbd318ad9a7a013d969
|
/maps/recommend.py
|
3e89d2b6aedd16c86a7926dba73835ece0ab7ebd
|
[] |
no_license
|
blvck-root/restaurant_ratings
|
97c38f4d224ffaae3e4eb4f23990db9e2872a1ec
|
0bdd702423b100fc6038c7d27103a33a6c7cc968
|
refs/heads/main
| 2023-08-17T12:19:04.782392
| 2020-12-08T17:43:00
| 2020-12-08T17:43:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,596
|
py
|
"""A Yelp-powered Restaurant Recommendation Program"""
from abstractions import *
from data import ALL_RESTAURANTS, CATEGORIES, USER_FILES, load_user_file
from ucb import main, trace, interact
from utils import distance, mean, zip, enumerate, sample
from visualize import draw_map
##################################
# Phase 2: Unsupervised Learning #
##################################
def find_closest(location, centroids):
"""Return the centroid in centroids that is closest to location.
If multiple centroids are equally close, return the first one.
>>> find_closest([3.0, 4.0], [[0.0, 0.0], [2.0, 3.0], [4.0, 3.0], [5.0, 5.0]])
[2.0, 3.0]
"""
# BEGIN Question 3
return min(centroids, key=lambda x: distance(location, x))
# END Question 3
def group_by_first(pairs):
"""Return a list of pairs that relates each unique key in the [key, value]
pairs to a list of all values that appear paired with that key.
Arguments:
pairs -- a sequence of pairs
>>> example = [ [1, 2], [3, 2], [2, 4], [1, 3], [3, 1], [1, 2] ]
>>> group_by_first(example)
[[2, 3, 2], [2, 1], [4]]
"""
keys = []
for key, _ in pairs:
if key not in keys:
keys.append(key)
return [[y for x, y in pairs if x == key] for key in keys]
def group_by_centroid(restaurants, centroids):
"""Return a list of clusters, where each cluster contains all restaurants
nearest to a corresponding centroid in centroids. Each item in
restaurants should appear once in the result, along with the other
restaurants closest to the same centroid.
"""
# BEGIN Question 4
pairs = [] # centroid-restaurant pairs
for restaurant in restaurants:
location = restaurant_location(restaurant) # restaurant location
centroid = find_closest(location, centroids) # closest centroid to restaurant location
pairs.append([centroid, restaurant])
return group_by_first(pairs)
# END Question 4
def find_centroid(cluster):
"""Return the centroid of the locations of the restaurants in cluster."""
# BEGIN Question 5
locations = list(map(restaurant_location, cluster))
latitudes = []
longitudes = []
for loc in locations:
latitudes.append(loc[0])
longitudes.append(loc[1])
return [mean(latitudes), mean(longitudes)]
# END Question 5
def k_means(restaurants, k, max_updates=100):
"""Use k-means to group restaurants by location into k clusters."""
assert len(restaurants) >= k, 'Not enough restaurants to cluster'
old_centroids, n = [], 0
# Select initial centroids randomly by choosing k different restaurants
centroids = [restaurant_location(r) for r in sample(restaurants, k)]
while old_centroids != centroids and n < max_updates:
old_centroids = centroids
# BEGIN Question 6
clusters = group_by_centroid(restaurants, centroids)
centroids = [find_centroid(cluster) for cluster in clusters]
# END Question 6
n += 1
return centroids
################################
# Phase 3: Supervised Learning #
################################
def find_predictor(user, restaurants, feature_fn):
"""Return a rating predictor (a function from restaurants to ratings),
for a user by performing least-squares linear regression using feature_fn
on the items in restaurants. Also, return the R^2 value of this model.
Arguments:
user -- A user
restaurants -- A sequence of restaurants
feature_fn -- A function that takes a restaurant and returns a number
"""
reviews_by_user = {review_restaurant_name(review): review_rating(review)
for review in user_reviews(user).values()}
xs = [feature_fn(r) for r in restaurants]
ys = [reviews_by_user[restaurant_name(r)] for r in restaurants]
# BEGIN Question 7
def sum_squares(list1, list2=None):
list2 = list1 if list2 is None else list2
mean1 = mean(list1)
mean2 = mean(list2)
return sum([(x - mean1) * (y - mean2) for x, y in zip(list1, list2)])
# sum squares
s_xx = sum_squares(xs)
s_yy = sum_squares(ys)
s_xy = sum_squares(xs, ys)
# regression coefficients and r_squared
b = s_xy / s_xx
a = mean(ys) - b * mean(xs)
r_squared = s_xy ** 2 / (s_xx * s_yy)
# END Question 7
def predictor(restaurant):
return b * feature_fn(restaurant) + a
return predictor, r_squared
def best_predictor(user, restaurants, feature_fns):
"""Find the feature within feature_fns that gives the highest R^2 value
for predicting ratings by the user; return a predictor using that feature.
Arguments:
user -- A user
restaurants -- A list of restaurants
feature_fns -- A sequence of functions that each takes a restaurant
"""
reviewed = user_reviewed_restaurants(user, restaurants)
# BEGIN Question 8
predictors = [find_predictor(user, reviewed, fn) for fn in feature_fns]
return max(predictors, key=lambda x: x[1])[0]
# END Question 8
def rate_all(user, restaurants, feature_fns):
"""Return the predicted ratings of restaurants by user using the best
predictor based on a function from feature_fns.
Arguments:
user -- A user
restaurants -- A list of restaurants
feature_fns -- A sequence of feature functions
"""
predictor = best_predictor(user, ALL_RESTAURANTS, feature_fns)
reviewed = user_reviewed_restaurants(user, restaurants)
# BEGIN Question 9
def rate(restaurant):
if restaurant in reviewed:
return user_rating(user, restaurant_name(restaurant))
else:
return predictor(restaurant)
return {restaurant_name(r): rate(r) for r in restaurants}
# END Question 9
def search(query, restaurants):
"""Return each restaurant in restaurants that has query as a category.
Arguments:
query -- A string
restaurants -- A sequence of restaurants
"""
# BEGIN Question 10
return [r for r in restaurants if query in restaurant_categories(r)]
# END Question 10
def feature_set():
"""Return a sequence of feature functions."""
return [lambda r: mean(restaurant_ratings(r)),
restaurant_price,
lambda r: len(restaurant_ratings(r)),
lambda r: restaurant_location(r)[0],
lambda r: restaurant_location(r)[1]]
@main
def main(*args):
import argparse
parser = argparse.ArgumentParser(
description='Run Recommendations',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-u', '--user', type=str, choices=USER_FILES,
default='test_user',
metavar='USER',
help='user file, e.g.\n' +
'{{{}}}'.format(','.join(sample(USER_FILES, 3))))
parser.add_argument('-k', '--k', type=int, help='for k-means')
parser.add_argument('-q', '--query', choices=CATEGORIES,
metavar='QUERY',
help='search for restaurants by category e.g.\n'
'{{{}}}'.format(','.join(sample(CATEGORIES, 3))))
parser.add_argument('-p', '--predict', action='store_true',
help='predict ratings for all restaurants')
parser.add_argument('-r', '--restaurants', action='store_true',
help='outputs a list of restaurant names')
args = parser.parse_args()
# Output a list of restaurant names
if args.restaurants:
print('Restaurant names:')
for restaurant in sorted(ALL_RESTAURANTS, key=restaurant_name):
print(repr(restaurant_name(restaurant)))
exit(0)
# Select restaurants using a category query
if args.query:
restaurants = search(args.query, ALL_RESTAURANTS)
else:
restaurants = ALL_RESTAURANTS
# Load a user
assert args.user, 'A --user is required to draw a map'
user = load_user_file('{}.dat'.format(args.user))
# Collect ratings
if args.predict:
ratings = rate_all(user, restaurants, feature_set())
else:
restaurants = user_reviewed_restaurants(user, restaurants)
names = [restaurant_name(r) for r in restaurants]
ratings = {name: user_rating(user, name) for name in names}
# Draw the visualization
if args.k:
centroids = k_means(restaurants, min(args.k, len(restaurants)))
else:
centroids = [restaurant_location(r) for r in restaurants]
draw_map(centroids, restaurants, ratings)
|
[
"m.ndlovu@alustudent.com"
] |
m.ndlovu@alustudent.com
|
c8501aec9cf89872de343889e16ce6003850dba2
|
d322b0ee85c61bcb8d26ad53b947ee6bba5c8453
|
/HW1_code/HW1.py
|
bc5e3fbdaed2420f5f076fc01a8aa2f7f0d5f028
|
[] |
no_license
|
jwymanumich/SI650
|
17f15eddf08a6aedde014f9a7abc2063f7e79bb5
|
4e51a69ca7dfcf660da318ac38b5d64f6d8bd263
|
refs/heads/master
| 2020-07-30T14:36:52.437461
| 2019-09-23T23:02:44
| 2019-09-23T23:02:44
| 210,265,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,777
|
py
|
from collections import defaultdict
import math
import matplotlib.pyplot as plt
import nltk
from nltk.stem import *
import operator
import plotly.express as px
import plotly.graph_objects as go
import re
def remove_stopwords_from_collection(collection):
''' Return a new collection with no stop words
'''
collection_out = {}
collection_out['Name'] = collection['Name']
collection_out['documents'] = []
for document in collection['documents']:
out_document = {'line':document['line'], "words":[], 'POS':[], "STOP_WORD":[]}
for index, sw in enumerate(document['STOP_WORD']):
if(sw == False):
out_document['words'].append(document['words'][index])
out_document['POS'].append(document['POS'][index])
collection_out['documents'].append(out_document)
return collection_out
def remove_stopwords_from_inverted_index(inverted_index):
''' Return a new inverted index with no stop words
'''
out_inverted_index = {}
for cur_word in inverted_index:
if(inverted_index[cur_word]['STOP_WORD'] == "False"):
out_inverted_index[cur_word] = inverted_index[cur_word]
return out_inverted_index
def frequency_of_stopwords(inverted_index):
'''percentage of the word occurrences that are stopwords.
counted from inverted_index and multipled by occurence count'''
words_total = sum(inverted_index[item]['total_frequency'] for item in inverted_index)
stop_words_total = sum(inverted_index[item]['total_frequency'] for item in inverted_index if inverted_index[item]['STOP_WORD'] is "True")
return [stop_words_total, words_total, float(stop_words_total)/words_total]
def percentage_of_capital_letters(collection):
''' Count the percentage of total characters that are upper case.
This needs to use the collection to insure that we are not losing
case infromation in the inverted_indes
'''
upper_case_count = 0
lower_case_count = 0
for row in collection['documents']:
for c in row['line']:
if (c.islower() is True):
lower_case_count += 1
elif (c.isupper() is True):
upper_case_count += 1
return [upper_case_count, lower_case_count, float(upper_case_count)/(upper_case_count + lower_case_count)]
def average_number_of_characters_per_word(inverted_index):
''' Calculate the average number of characters per word
We can do this faster with the inverted_index and
multiplying value by total_frequency
'''
total_chars = 0
total_words = 0
for item in inverted_index:
inverted_index_item = inverted_index[item]
total_words += inverted_index_item['total_frequency']
total_chars += len(item) * inverted_index_item['total_frequency']
return [total_chars, total_words, float(total_chars)/total_words]
def percentage_of_nouns_adjectives_verbs_adverbs_pronouns(collection):
''' Count from collection to maintain contextual information in each location.
'''
noun_count = 0
adj_count = 0
verb_count = 0
adv_count = 0
pronoun_count = 0
total_words = 0
for document in collection['documents']:
total_words += len(document['words'])
for pos in document['POS']:
if(pos.startswith("N")):
noun_count += 1
elif(pos.startswith("J")):
adj_count += 1
elif(pos.startswith("V")):
verb_count += 1
elif(pos.startswith("RB")):
adv_count += 1
elif(pos.startswith("PR") or pos.startswith("WP")):
pronoun_count += 1
return {"Noun": [noun_count, total_words, float(noun_count)/total_words],
"Adjective": [adj_count, total_words, float(adj_count)/total_words],
"Verb": [verb_count, total_words, float(verb_count)/total_words],
"Adverb": [adv_count, total_words, float(adv_count)/total_words],
"Pronoun": [pronoun_count, total_words, float(pronoun_count)/total_words]}
def top_nouns_verbs_adjectives(collection):
''' Count most frequent occurences of noun, verb, adj
Use noun to maintain contextual information.
'''
counter = {'N':defaultdict(lambda: 0), "V":defaultdict(lambda: 0), "J":defaultdict(lambda: 0)}
for document in collection['documents']:
for index, word in enumerate(document['words']):
pos = document['POS'][index]
if(pos.startswith("N")):
counter['N'][word.lower()] += 1
elif(pos.startswith("V")):
counter['V'][word.lower()] += 1
elif(pos.startswith("J")):
counter['J'][word.lower()] += 1
response = {}
for pos in [["N", "Noun"], ["V", "Verb"], ["J", "Adjectives"]]:
sorted_words = sorted(counter[pos[0]].items(), key=lambda k_v: k_v[1], reverse=True) #2010
response[pos[1]] = sorted_words[0:10]
return response
def tfidf(collection, inverse_index):
'''
'''
collection_tfidf = []
total_documents = len(collection['documents'])
for document in collection['documents'][0:10]:
document_tf = defaultdict(lambda: 0)
document_tfidf = {}
# get a list of document words and occurences
for index, word in enumerate(document['words']):
document_tf[word] += 1
# For each document word
for word in document_tf:
# Calculate the TF value
# T F (t, d) = log(c(t, d) + 1)
tf_value = math.log(1 + document_tf[word])
# IDF(t) = 1 + log(N/k).
document_frequency = len(set(inverse_index[word]['doc_ids']))
idf = 1 + math.log(total_documents/document_frequency)
# put TF and IDF for 'word' together
# and store by document
document_tfidf[word] = tf_value*idf
# Store document tfidf information by collection
collection_tfidf.append(document_tfidf)
return collection_tfidf
def plot_data(name, inverted_index):
''' Plat the data to a graph and a log log graph
'''
x = []
y = []
# Count the number of words with each number of occurences
value_count = defaultdict(lambda: 0)
for key in inverted_index:
value_count[int(inverted_index[key]['total_frequency'])] += 1
vocabulary = len(inverted_index)
#Use unique words
for k in sorted(value_count.items(), key=lambda k_v: k_v[0], reverse=True):
y.append(float(k[1])/vocabulary)
x.append(k[0])
plt.plot(x, y, "ro")
plt.ylabel('occurences')
plt.xlabel('rank order')
plt.title(name)
plt.show()
plt.plot(x, y, "ro")
plt.ylabel('log occurences')
plt.xlabel('log rank order')
plt.xscale('log')
plt.yscale('log')
plt.title(name)
plt.show()
def load_stop_words():
''' Load words from the file, and strip carraige returns'''
s = set()
for line in open('stoplist.txt', "r", encoding="utf-8").readlines():
s.add(line.strip('\n'))
return s
def load_data(file_name, stop_words):
''' Load words from file, skipping items matching values
in the provided set of stop_words'''
stemmer = PorterStemmer()
inverted_index = defaultdict(lambda: {'total_frequency' : 0, "POS":'', "STOP_WORD":'False', "doc_ids": [], "frequency":[]})
my_collection = {"Name":file_name, 'documents':[]}
cur_record = 1
for line in open(file_name, "r", encoding="utf-8").readlines():
document = {'line':line, "words":[], 'POS':[], "STOP_WORD":[]}
line_tok = nltk.word_tokenize(line)
for word_pos in nltk.pos_tag(line_tok):
cur_word = word_pos[0].lower()
s = stemmer.stem(cur_word)
x = re.search("[a-zA-Z]", s)
if(x is not None):
document['words'].append(cur_word)
inverted_index_item = inverted_index[cur_word]
inverted_index_item['total_frequency'] += 1
inverted_index_item['POS'] = word_pos[1]
document['POS'].append(word_pos[1])
document['STOP_WORD'].append(cur_word in stop_words)
if(cur_record not in inverted_index_item['doc_ids']):
inverted_index_item['doc_ids'].append(cur_record)
inverted_index_item['frequency'].append(1)
else:
index = inverted_index_item['doc_ids'].index(cur_record)
inverted_index_item['frequency'][index] += 1
my_collection['documents'].append(document)
cur_record += 1
for cur_word in inverted_index:
if(cur_word.lower() in stop_words):
inverted_index[cur_word]['STOP_WORD'] = "True"
return inverted_index, my_collection
global_stop_words = load_stop_words()
inverted_index_medhelp, collection_medhelp = load_data("medhelp.txt", global_stop_words)
inverted_index_ehr, collection_ehr = load_data("ehr.txt", global_stop_words)
collection_medhelp_no_stop_words = remove_stopwords_from_collection(collection_medhelp)
collection_ehr_no_stop_words = remove_stopwords_from_collection(collection_ehr)
inverted_index_medhelp_no_stop_words = remove_stopwords_from_inverted_index(inverted_index_medhelp)
inverted_index_ehr_no_stop_words = remove_stopwords_from_inverted_index(inverted_index_ehr)
plot_data("medhelp", inverted_index_medhelp_no_stop_words)
plot_data("ehr", inverted_index_ehr_no_stop_words)
print("Q2.2 stats on {} and {}".format(collection_medhelp['Name'], collection_ehr['Name']))
print("Q2.2a - Frequency of Stopwords.")
print("medhelp - {}".format(frequency_of_stopwords(inverted_index_medhelp)[2]))
print("ehr - {}".format(frequency_of_stopwords(inverted_index_ehr)[2]))
print("Q2.2b - Percentage of capital letters")
print("medhelp - {}".format(percentage_of_capital_letters(collection_medhelp)[2]))
print("ehr - {}".format(percentage_of_capital_letters(collection_ehr)[2]))
print("Q2.2c - Average Number of Characters per word")
print("medhelp - {}".format(average_number_of_characters_per_word(inverted_index_medhelp)[2]))
print("ehr - {}".format(average_number_of_characters_per_word(inverted_index_ehr)[2]))
print("Q2.2d - Percentage of Nouns, Adjectives, Verbs, Adverbs, and Pronouns")
r1 = percentage_of_nouns_adjectives_verbs_adverbs_pronouns(collection_medhelp)
r2 = percentage_of_nouns_adjectives_verbs_adverbs_pronouns(collection_ehr)
for key in list(r1):
print("{}\t{}\t{}".format(key, r1[key][2], r2[key][2]))
print("2.2e - The Top 10 Nouns, Top 10 Verbs, and Top 10 Adjectives.")
r1 = top_nouns_verbs_adjectives(collection_medhelp_no_stop_words)
r2 = top_nouns_verbs_adjectives(collection_ehr_no_stop_words)
for key in list(r1):
print("\n{}".format(key))
for item in range(0,10):
print("medhelp - {}".format(r1[key][item]))
for item in range(0,10):
print("ehr - {}".format(r2[key][item]))
print("Q2.3 TF-IDF top scores Medhelp")
for idx, document_tfidf in enumerate(tfidf(collection_medhelp_no_stop_words, inverted_index_medhelp_no_stop_words)):
print("Document {}".format(idx+1))
sorted_tfidf = sorted(document_tfidf.items(), key=lambda k_v: k_v[1], reverse=True)
for tfidf_item in sorted_tfidf[0:5]:
print("\tword:{} values:{}".format(tfidf_item[0], tfidf_item[1]))
print("Q2.3 TF-IDF top scores Ehr")
for idx, document_tfidf in enumerate(tfidf(collection_ehr_no_stop_words, inverted_index_ehr_no_stop_words)):
print("Document {}".format(idx+1))
sorted_tfidf = sorted(document_tfidf.items(), key=lambda k_v: k_v[1], reverse=True)
for tfidf_item in sorted_tfidf[0:5]:
print("\tword:{} values:{}".format(tfidf_item[0], tfidf_item[1]))
|
[
"jwyman@umich.edu"
] |
jwyman@umich.edu
|
a47f2449b19e9bc97040fc3bd4879370f4bb2788
|
82c013d796dd9b15dab7b33952aa68ebef62e23d
|
/amrequest.py
|
661c9e75684cab9c547547f5d26075b7df84405c
|
[] |
no_license
|
qwenghernaez/phil_steel
|
f4d92d75fa98e10c4a899285035466a5fa8bf14a
|
9ba5c808c4d4260cb58b7ff157a24ae195305a63
|
refs/heads/master
| 2021-01-21T08:46:33.135018
| 2017-05-18T05:54:26
| 2017-05-18T05:54:26
| 91,641,028
| 0
| 0
| null | 2017-05-18T05:33:34
| 2017-05-18T02:40:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,303
|
py
|
from odoo import models, fields, api
class AMRequests(models.Model):
_name = 'philsteel.amrequests'
customer = fields.Char(string='Customer')
status = fields.Selection([('new', 'New'), ('visited', 'Visited')], default='new', string='Request Status')
illustrations = fields.One2many(
'philsteel.amrimages', 'rfam', string="Illustrations")
request_number = fields.Char(string='Request Number')
location = fields.Text(string='Address')
name = fields.Many2one(
'philsteel.projects', 'Project Name', ondelete='cascade', required='True'
)
project_type = fields.Selection([('residential', 'Residential'), ('commercial', 'Commercial'), ('industrial', 'Industrial'), ('government', 'Government'), ('institutional', 'Institutional'), ('mass_housing', 'Mass Housing')], string='Type of Project')
project_site_address = fields.Text(string='Complete Project Site Address', required='True')
general_contractor = fields.Many2one(
'philsteel.projectmanpower', 'Name of contractor', ondelete='cascade'
)
contact_person_at_site = fields.Many2many('philsteel.sitecontacts', string='Site Contact Person', ondelete='cascade')
jobsite_contact_number = fields.Char(string='Job Site Telephone or Mobile Number')
product_profile = fields.Char(string='Product Profile')
sc_number = fields.Char(string='SC NO')
ic_number = fields.Char(string='IC NO')
sq_number = fields.Char(string='SQ NO')
iq_number = fields.Char(string='IQ NO')
work_scope = fields.Many2many('philsteel.workscope', string='Scope of Work', ondelete='cascade')
frames_trusses_installed = fields.Char(string='% Frames / Trusses Installed')
purlins_installed = fields.Char(string='% Purlins Installed')
sogrod_installed = fields.Char(string='% Sagrod Installed')
beam_installed = fields.Char(string='% Beam Installed')
floors_available_for_measurement = fields.Char(string='% No. of Floors Available for Measurement')
rfm_quotation = fields.Boolean(string='Quotation')
rfm_contract = fields.Boolean(string='Contact')
rfm_fabrication = fields.Boolean(string='Fabrication')
rfm_tech1assistance = fields.Boolean(string='Tech 1 Assistance')
rfm_others = fields.Text(string='Others')
ready_for_measurement_date = fields.Date(string='Date when structure ready for measurement')
accomplished_by = fields.Many2one(
'philsteel.projectmanpower', 'Accomplished By', ondelete='cascade'
)
date_filed = fields.Date(string='Date Filed')
approved_by = fields.Many2one(
'philsteel.contacts', 'Approve By', ondelete='cascade'
)
assigned_by = fields.Many2one(
'philsteel.android', 'Assigned By', ondelete='cascade', required='True'
)
#image = fields.Binary()
statuss = fields.Selection([
('draft', 'Draft'),
('approved', 'Approved'),
('ongoing', 'Ongoing'),
('done', 'Done'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
@api.onchange('name')
def get_proj_details(self):
for record in self:
record.customer = record.name.customer_name
record.ic_number = record.name.ic_no
record.sc_number = record.name.sc_no
record.location = record.name.location
record.project_type = record.name.types_of_project
@api.multi
def action_approved(self):
for visit in self:
visit.statuss = 'approved'
return True
@api.multi
def action_ongoing(self):
for visit in self:
visit.statuss = 'ongoing'
return True
@api.multi
def action_done(self):
for visit in self:
visit.statuss = 'done'
return True
# @api.model
# def create(self, values):
# """
# Create a new record for a model ModelName
# @param values: provides a data for new record
# @return: returns a id of new record
# """
# if values.get('request_number', 'New') == 'New':
# values['request_number'] = self.env['ir.sequence'].next_by_code('philsteel.amrequests') or 'New'
# result = super(AMRequests, self).create(values)
# return result
class AMRImages(models.Model):
_name = 'philsteel.amrimages'
name = fields.Binary(string='Image')
description = fields.Text(string='Description')
rfam = fields.Many2one('philsteel.amrequests',
ondelete='cascade', string="RFAM", required=True)
new_field = fields.Binary()
|
[
"noreply@github.com"
] |
qwenghernaez.noreply@github.com
|
bfe91a389a2793bab81af26924332598622638eb
|
14c9e16013866a59efd1981811083bd97f7ed780
|
/retrieve_similar_bugs.py
|
c2c8ee8caaa13af3ef8b28d00d3fe347ea20aae2
|
[] |
no_license
|
kunchengit/TriageRobot
|
186bf44dec776b14cad9acf7aad0ef86658c56f2
|
8ad86e330eb219831fcc8d9352662221b10bedad
|
refs/heads/master
| 2021-01-10T07:57:04.775020
| 2015-10-16T08:48:52
| 2015-10-16T08:48:52
| 44,372,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,689
|
py
|
import MySQLdb
import pandas
import itertools
import numpy as np
import bm25fe
import pickle
import subprocess
import jsd
from gensim import corpora
from gensim import matutils
from gensim.models import ldamulticore
import getpass
import os
def retrieve_similar_bugs(query_list, length_list, dictionary_address, topicmodel_address, rankmodel_address):
print 0, getpass.getuser(), os.getcwd()
conn = MySQLdb.connect(host='10.117.8.41', port=3306, user='root', passwd='vmware', db='bugfeature')
cur = conn.cursor()
print 1, getpass.getuser(), os.getcwd()
sql = '''SELECT * FROM bugs_cpdplatform_ff'''
bugs = pandas.io.sql.read_sql(sql, conn)
print 2, getpass.getuser(), os.getcwd()
dictionary = corpora.Dictionary.load_from_text(dictionary_address)
topicmodel = ldamulticore.LdaMulticore.load(topicmodel_address)
print 3, getpass.getuser(), os.getcwd()
num_terms = len(dictionary)
bugs['text'] = (bugs['short_desc'] +' '+ bugs['long_desc']).map(lambda x: dictionary.doc2bow(x.split()))
bugs['engineer'] = (bugs['assigned_to'].map(str)+' '+bugs['needinfo']).map(lambda x: x.split())
bugs.loc[:,'short_desc'] = bugs['short_desc'].map(lambda x: matutils.corpus2dense([dictionary.doc2bow(x.split())], num_terms, 1)[:,0])
bugs.loc[:,'long_desc'] = bugs['long_desc'].map(lambda x: matutils.corpus2dense([dictionary.doc2bow(x.split())], num_terms, 1)[:,0])
appearance = np.array(list(bugs['text'].map(lambda x: matutils.corpus2dense([x], num_terms, 1)[:,0]>0)))
df = appearance.sum(0)
idf = np.log(bugs.shape[0]/df)
avgfl = np.array([np.array(list(bugs['short_desc'])).sum(1).mean(), np.array(list(bugs['long_desc'])).sum(1).mean()])
bugs = bugs.set_index(['bug_id'])
print 4, getpass.getuser(), os.getcwd()
bm = bm25fe.bm25fe(K1=1.2, d_B=(0.75, 0.75), d_W = (2, 1), K3=1.2, q_B=(0.75, 0.75), q_W=(2, 1))
results = {}
lines = []
for item in query_list:
item = int(item)
bugs['score'] = bugs.apply(lambda x: bm.score(idf, avgfl, [x[13], x[14]],[bugs.loc[item,'short_desc'], bugs.loc[item,'long_desc']]), axis = 1)
bugs_sorted = bugs.sort(['score'], ascending = False).iloc[:100].reset_index()
results[item] = bugs_sorted.loc[:,['bug_id']]
# print results[item]
# idx = 0
# lines = []
for idx in xrange(100):
sim_title = bugs_sorted.iloc[idx]['short_desc'][bugs.loc[item,'short_desc']>0].sum()/max(bugs_sorted.iloc[idx]['short_desc'].sum(), 1)
score = bugs_sorted.iloc[idx]['score']
# cluster = topicmodel.inference([bugs_sorted.iloc[idx]['text'], bugs.loc[item['query'],'text']])
cluster = topicmodel.inference([bugs_sorted.iloc[idx]['text'], bugs.loc[item,'text']])[0]
dis_topic = jsd.JSD(cluster[0], cluster[1])
sim_hos = False
if (bugs_sorted.iloc[idx]['host_op_sys'] == bugs.loc[item,'host_op_sys']) and (bugs_sorted.iloc[idx]['host_op_sys'] != 'Unknown'):
sim_hos = True
sim_gos = False
if (bugs_sorted.iloc[idx]['guest_op_sys'] == bugs.loc[item,'guest_op_sys']) and (bugs_sorted.iloc[idx]['guest_op_sys'] != 'Unknown'):
sim_gos = True
sim_pd = False
if (bugs_sorted.iloc[idx]['product_id'] == bugs.loc[item,'product_id']):
sim_pd = True
sim_cg = False
if (bugs_sorted.iloc[idx]['category_id'] == bugs.loc[item,'category_id']):
sim_cg = True
sim_cp = False
if (bugs_sorted.iloc[idx]['component_id'] == bugs.loc[item,'component_id']):
sim_cp = True
sim_pr = False
if (bugs_sorted.iloc[idx]['priority'] == bugs.loc[item,'priority']):
sim_pr = True
sim_fi_pd = False
if (bugs_sorted.iloc[idx]['found_in_product_id'] == bugs.loc[item,'found_in_product_id']) and (bugs_sorted.iloc[idx]['found_in_product_id'] != 0):
sim_fi_pd = True
sim_fi_ver = False
if (bugs_sorted.iloc[idx]['found_in_version_id'] == bugs.loc[item,'found_in_version_id']) and (bugs_sorted.iloc[idx]['found_in_version_id'] != 0):
sim_fi_ver = True
sim_fi_ph = False
if (bugs_sorted.iloc[idx]['found_in_phase_id'] == bugs.loc[item,'found_in_phase_id']) and (bugs_sorted.iloc[idx]['found_in_phase_id'] != 0):
sim_fi_ph = True
if (bugs_sorted.iloc[idx]['cf_security'] == bugs.loc[item,'cf_security']) and (bugs_sorted.iloc[idx]['cf_security'] ==1):
sim_security = 2
elif (bugs_sorted.iloc[idx]['cf_security'] == bugs.loc[item,'cf_security']) and (bugs_sorted.iloc[idx]['cf_security'] ==0):
sim_security = 1
else:
sim_security = 0
sim_engineer = False
if (len(set(bugs_sorted.iloc[idx]['engineer']) & set(bugs.loc[item,'engineer'])) >0):
sim_engineer = True
lines.append(str(0)+' qid:'+str(item)+' 1:'+str(sim_title)+' 2:'+str(score)+' 3:'+str(dis_topic)+' 4:'+str(int(sim_hos))+' 5:'+str(int(sim_gos))+' 6:'+str(int(sim_pd))+' 7:'+str(int(sim_cg))+' 8:'+str(int(sim_cp))+' 9:'+str(int(sim_pr))+' 10:'+str(int(sim_fi_pd))+' 11:'+str(int(sim_fi_ver))+' 12:'+str(int(sim_fi_ph))+' 13:'+str(sim_security)+' 14:'+str(int(sim_engineer))+' # '+str(bugs_sorted.iloc[idx]['bug_id'])+'\n')
print 5, getpass.getuser(), os.getcwd()
f = open('/home/TriageRobot/query.txt', 'w')
f.writelines(lines)
f.close()
subprocess.call(('java', '-jar', '/root/chenkun/Duplicate-bugs-retrieval/RankLib-2.1-patched.jar', '-load', rankmodel_address, '-rank', '/home/TriageRobot/query.txt', '-score', '/home/TriageRobot/score.txt'))
# subprocess.call(('java', '-jar', 'RankLib-2.1-patched.jar', '-load', 'AdaRank.txt', '-rank', 'query.txt', '-score', 'score.txt'))
# subprocess.call(('java', '-jar', 'RankLib-2.1-patched.jar', '-load', 'RankNet.txt', '-rank', 'query.txt', '-score', 'score.txt'))
score_rank = []
qid = -1
f = open('/home/TriageRobot/score.txt', 'r')
for line in f:
if int(line.split()[0]) != qid:
if score_rank:
results[qid]['score_rank'] = score_rank
score_rank = []
qid = int(line.split()[0])
score_rank.append(float(line.split()[2]))
else:
score_rank.append(float(line.split()[2]))
results[qid]['score_rank'] = score_rank
f.close()
# print results
idx = 0
for key in results:
bugs_ranked = results[key].sort(['score_rank'], ascending = False).set_index(['bug_id'])
ranklist = []
i = 0
while len(ranklist) < int(length_list[idx]):
# print bugs_ranked.iloc[i]['bug_id']
if bugs_ranked.index[i] != key:
child = False
for j in xrange(len(ranklist)):
if bugs.loc[bugs_ranked.index[i],'summary'] == bugs.loc[ranklist[j],'summary']:
# if len(set([bugs_ranked.index[i]]) & set(item['rel'])) > 0:
# ranklist[j] = bugs_ranked.index[i]
child = True
break
if not child:
ranklist.append(bugs_ranked.index[i])
# ranklist.append(bugs_ranked.index[i])
i += 1
results[key] = ranklist
idx += 1
return results
def find_bug():
f = open('/home/TriageRobot/query.txt', 'w')
|
[
"root@TriageRobot-prod.(none)"
] |
root@TriageRobot-prod.(none)
|
2de8589e6440ac454067a5e013182e90329b0b7b
|
a3ac650399a159b5273005794bf54e0d1586004c
|
/Einarbeitung/Word2Vec/Test1/T4.py
|
fb9c8fa6b028cac34886c953ad3eb98bf73f083e
|
[] |
no_license
|
imbabaer/SemanticRelations
|
714c77320c7d26e7a6077d25d095d472037de838
|
13a8139e1cf6b2b70914bb64ad33e2654123eee7
|
refs/heads/master
| 2020-04-08T21:10:48.040347
| 2015-09-10T15:16:23
| 2015-09-10T15:16:23
| 32,916,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
import gensim, logging
import os
import math
folder="../../../Korpora/Wikipedia/"
preprocessedKorpus = folder+"enwiki-latest-pages-articles_clean.txt";
tmpPreprocessed = folder + "latest-pages-tmpPreprocessed/"
if not os.path.exists(tmpPreprocessed):
os.makedirs(tmpPreprocessed)
#preprocessed Korpus
x = 0
y=0
tmp = []
with open(preprocessedKorpus) as infile:
for line in infile:
if x < 100000:
tmp.extend(line)
x+=1
else:
f = open(tmpPreprocessed+'file'+str(y),'w')
for item in tmp:
f.write(item)
f.close()
print "created: preprocessed/file"+str(y)
tmp=[]
x=0
y+=1
'''
lines= file.read()
lenght=len(lines)
numFiles = 10000
partitionSize = int(math.ceil(lenght/numFiles))
for x in range(0, numFiles):
f = open(tmpPreprocessed+'file'+str(x),'w')
for y in range (0,partitionSize):
f.write(lines[x*partitionSize +y])
f.close()
print "created: preprocessed/file"+str(x)
'''
print "done."
|
[
"rubenmueller90@gmail.com"
] |
rubenmueller90@gmail.com
|
a50adbea54a6894334b93d659ac790fc34a72e26
|
79c70e1bee028fb988fd77bd022cda19268f5bac
|
/Exercícios Resolvidos/The Huxley/248 - Última palavra de uma frase.py
|
f82167ad9ca2386e6d24cb424918f4df113b724e
|
[
"MIT"
] |
permissive
|
GuilhermeEsdras/Mamba-3
|
c2ff1c88a7a71f8aab8b57019ccb764e3a7aba47
|
b1b4352cad2961876f30e5ffe29b4cd19bddede5
|
refs/heads/master
| 2020-06-19T16:45:28.792035
| 2019-07-14T04:36:31
| 2019-07-14T04:36:31
| 196,789,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
frase = str(input()).split()
print(frase[-1])
|
[
"guilherme.esdras@outlook.com"
] |
guilherme.esdras@outlook.com
|
87fd206f0463a131023a42fd8e873662634b083b
|
b3339de21179652caeac639d3ed04613367f5b9e
|
/2021.1/Back_End/Semana_02/AppAnimal1.py
|
470f1fe7a95d3aeed50dbe75c684eeeb3681dc27
|
[
"MIT"
] |
permissive
|
felipegt56/IFPI-TDS
|
48ea243a11d13d88253e9858af358be5f022f1f5
|
b413f9e96eb54816bebc4692d3bfd90e350eac72
|
refs/heads/main
| 2023-06-06T18:55:05.954808
| 2021-07-09T22:28:13
| 2021-07-09T22:28:13
| 357,725,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
from fastapi import FastAPI
from pydantic import BaseModel
from typing import List, Optional
from uuid import uuid4
app = FastAPI()
class Animais(BaseModel):
id: Optional[str]
nome: str
idade: int
sexo: str
cor: str
bd_animais: List[Animais] = []
@app.post('/animais')
def Sistema_cadastro(animal: Animais):
animal.id = str(uuid4())
bd_animais.append(animal)
return None
@app.get('/animais')
def listar_animais():
return bd_animais
@app.get('/animais/{animal_id}')
def localizar_id(animal_id: str):
for animal in bd_animais:
if animal.id == animal_id:
return animal
return {'erro': 'Animal não encontrado'}
@app.delete('/animais/{animal_id}')
def deleta_id(animal_id: str):
pos = -1
for index, animal in enumerate(bd_animais):
if animal.id == animal_id:
pos = index
break
if pos != -1:
bd_animais.pop(pos)
return {'Mensagem': 'animal removido com sucesso!'}
else:
return {'Erro': 'Animal não encontrado!'}
|
[
"69603279+felipegt56@users.noreply.github.com"
] |
69603279+felipegt56@users.noreply.github.com
|
170c80b32c28e732b03d4aa88dca2ddfad488590
|
677002b757c0a1a00b450d9710a8ec6aeb9b9e9a
|
/tiago_public_ws/devel/lib/python2.7/dist-packages/pal_vision_msgs/msg/_LegDetections.py
|
795d7a6e5a0ddc8937c4f89b902f18ac6d08097e
|
[] |
no_license
|
mrrocketraccoon/tiago_development
|
ce686c86459dbfe8623aa54cf4279021342887fb
|
a0539bdcf21b67ab902a4649b516dcb929c54042
|
refs/heads/main
| 2023-06-16T19:39:33.391293
| 2021-07-08T21:20:03
| 2021-07-08T21:20:03
| 384,249,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
/tiago_public_ws/devel/.private/pal_vision_msgs/lib/python2.7/dist-packages/pal_vision_msgs/msg/_LegDetections.py
|
[
"ricardoxcm@hotmail.com"
] |
ricardoxcm@hotmail.com
|
d09de466a60c088bd29f62ed170d112b8a9c6c21
|
609ac0a3489abeddd04e24c35f757163ea044ec9
|
/ntpu_system/mysite/alumni/urls.py
|
58d3307d0ceaf3cc353deff320f4a988a79149b8
|
[] |
no_license
|
aron3312/ntpu_system
|
988748bc2d831de0a429d3e02a5d9338697d674d
|
b1f0729c3a018da31d0496eb0b45600069ad22e5
|
refs/heads/master
| 2022-04-29T16:52:11.273522
| 2022-03-28T07:36:06
| 2022-03-28T07:36:06
| 108,516,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
from django.conf.urls import url
from alumni import views
urlpatterns = [
url(r'^alumni/introduction$',views.alumni_introduction,name='alumni_introduction'),
url(r'^alumni/activity$',views.alumni_activity,name='alumni_activity'),
url(r'^alumni/network$',views.network,name='alumni_network')
]
|
[
"aron3313@gmail.com"
] |
aron3313@gmail.com
|
5945b1b91845f0686215e05e18fc21cadfc1fcc2
|
5832f65747e6142d1b8de9d46aa507092782aafc
|
/Codeforces/Educational Codeforces Round 62 (Rated for Div. 2) - 1140/1140A-Detective Book.py
|
4762feb138af0272fb549b4278310af587203de2
|
[] |
no_license
|
subhashreddykallam/Competitive-Programming
|
64cc42c5b23c03536187a1bb54e2b2ed82ee7844
|
973b66b4eb81352b98409ca52fa3aa75c28d8b6f
|
refs/heads/master
| 2022-05-28T21:07:43.012922
| 2020-05-05T20:34:20
| 2020-05-05T20:34:20
| 226,814,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
import math
from collections import deque, defaultdict
from sys import stdin, stdout
#input = stdin.readline
# print = stdout.write
listin = lambda : list(map(int, input().split()))
mapin = lambda : map(int, input().split())
n = int(input())
a = listin()
k = -1
count = 0
for i in range(n):
k = max(k, a[i]-1)
if k == i:
count+=1
print(count)
|
[
"42376739+Storm1seven@users.noreply.github.com"
] |
42376739+Storm1seven@users.noreply.github.com
|
71007b9129418fa6433dd591eb30efdc6a674d0b
|
78c2fd94694efdbeedec9a17665e0a261d7ab4a5
|
/app/config.py
|
6bc45ff88c2f51dd3d42088a28a6ba47fb2f6782
|
[
"MIT"
] |
permissive
|
pingpgeti/elliot-chat-client
|
93499a65c1b32501949c1dc983912e51d7b5d71a
|
4256dc78a514b93ef7a325d7ba5ad2674963d74f
|
refs/heads/master
| 2023-04-27T07:19:09.299344
| 2021-05-13T12:42:45
| 2021-05-13T12:42:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
from enum import Enum
from pathlib import Path
# server constants
SERVER_URL = "http://localhost:5000"
TABLE_SCHEMA_PATH = str(Path("app") / "database" / "schema.sql")
DEFAULT_DB_PATH = "user.db"
FETCH_DELAY_PERIOD = 5 # time period beetween each server data update
# other
PREFFERED_ENCODING = "utf-8"
# crypto constants
HASH_SALT = "made by wilkueti".encode(PREFFERED_ENCODING) # NEVER DO THIS!!!
MAX_ONE_TIME_KEYS = 15
# length of the keyes is derived from the signal documentation
SHARED_KEY_LENGTH = 32
RATCHET_STATE_KEY_LENGTH = 64
# according to crypto library docs nonce should have 96 bits
AEAD_NONCE = "SEG0PPiuHAFm".encode(PREFFERED_ENCODING)
BLOCK_SIZE = 128
class MainMenuOptions(Enum):
MESSAGE = 0
ADD_FRIEND = 1
CHANGE_CREDENTIALS = 2
REMOVE_ACCOUNT = 3
WAITROOM = 4
EXIT = 5
|
[
"michal.wilk0@yahoo.com"
] |
michal.wilk0@yahoo.com
|
5e76996ba10ca4cfef5a7a017be9aee004383cb6
|
99438c924fafc690ac3ee2836e0dd6884123ead7
|
/main.py
|
02d1cf86abe2b4d792827e5dfebc6c062f4f8245
|
[] |
no_license
|
emptyflash/twitter-scraping-v2
|
ddcf0e9b7206b097ac2648bc9f06366d3745134e
|
884401b175a19b6d58038a0733e822d8fb644002
|
refs/heads/master
| 2023-05-24T12:47:46.187197
| 2020-01-28T02:27:43
| 2020-01-28T02:27:43
| 236,641,932
| 0
| 0
| null | 2023-05-22T22:39:10
| 2020-01-28T02:28:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,058
|
py
|
import json
import datetime
import requests
from bs4 import BeautifulSoup
HEADERS = {
"authority": "twitter.com",
"accept":"application/json, text/javascript, */*; q=0.01",
"accept-language":"en-US,en;q=0.9",
"sec-fetch-mode":"cors",
"sec-fetch-site":"same-origin",
"x-asset-version":"42599c",
"x-push-state-request":"true",
"x-requested-with":"XMLHttpRequest",
"x-twitter-active-user":"yes",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"cookie": '_twitter_sess=BAh7CSIKZmxhc2hJQzonQWN0aW9uQ29udHJvbGxlcjo6Rmxhc2g6OkZsYXNo%250ASGFzaHsABjoKQHVzZWR7ADoPY3JlYXRlZF9hdGwrCBm52dZvAToMY3NyZl9p%250AZCIlYmFkYTYxOWViNTdiM2M4MWY0OTVlOTA5MjdmOTRlOGM6B2lkIiU4ZjY2%250AYzI5YTdhZGE0NDI2MDNlMjA0M2IwMThlYmMyMw%253D%253D--a6151e23c827fa6018ae4213dbef144ff9966799; personalization_id="v1_iz/bqzAp5VjuGiJYpE9raQ=="; guest_id=v1%3A157985759055012300; ct0=a1e56bd7a069a92bc87d684d2ed82c4e; _ga=GA1.2.1641248728.1579857593; _gid=GA1.2.443586604.1579857593; tfw_exp=0; _gat=1',
}
def build_q(username, since, until):
return f"from:{username} since:{since.isoformat()} until:{until.isoformat()} include:retweets"
def extract_tweets(soup):
tweet_divs = soup.select("div.tweet")
tweets = []
for tweet in tweet_divs:
id = tweet["data-tweet-id"]
retweet_count = tweet.select_one(".ProfileTweet-action--retweet .ProfileTweet-actionCount")["data-tweet-stat-count"]
favorite_count = tweet.select_one(".ProfileTweet-action--favorite .ProfileTweet-actionCount")["data-tweet-stat-count"]
reply_count = tweet.select_one(".ProfileTweet-action--reply .ProfileTweet-actionCount")["data-tweet-stat-count"]
tweets.append({
"id": id,
"retweet_count": retweet_count,
"favorite_count": favorite_count,
"reply_count": reply_count,
})
return tweets
def timeline_search(q, max_position):
params = {
"vertical": "default",
"f": "tweets",
"q": q,
"src": "typd",
"include_available_features": "1",
"include_entities": "1",
"max_position": max_position,
"reset_error_state": False,
}
result = requests.get("https://twitter.com/i/search/timeline", params=params, headers=HEADERS).json()
soup = BeautifulSoup(result["items_html"], 'html.parser')
tweets = extract_tweets(soup)
min_position = result["min_position"]
has_more_items = result["has_more_items"]
return tweets, min_position, has_more_items
def init_search(q):
params = {
"src": "typd",
"f": "tweets",
"q": q
}
result = requests.get("https://twitter.com/search", params=params, headers=HEADERS).json()
soup = BeautifulSoup(result["page"], 'html.parser')
stream = soup.select_one("div.stream-container")
if stream is None:
print(f"No results found for {q}")
return set(), None
min_position = stream["data-min-position"]
tweets = extract_tweets(soup)
return tweets, min_position
def get_all_tweets(username, start, end, step=datetime.timedelta(days=90)):
since = start
tweets = []
while since != end:
until = since + step
if until > end:
until = end
q = build_q(username, since, until)
init_tweets, min_position = init_search(q)
tweets += init_tweets
print(init_tweets)
has_more_items = True
while has_more_items and min_position:
more_tweets, min_position, has_more_items = timeline_search(q, min_position)
print(more_tweets)
tweets += more_tweets
since = until
return tweets
if __name__ == "__main__":
username = "emptyflash"
start = datetime.date(2009, 9, 1)
end = datetime.date.today()
tweets = get_all_tweets(username, start, end)
import pdb; pdb.set_trace()
with open(f"{username}.json", 'w') as outfile:
json.dump(tweets, outfile)
|
[
"emptyflash@gmail.com"
] |
emptyflash@gmail.com
|
430164e8298b0a62a94c538521da7e1c71e9a6e4
|
0b354e25ca146869d2e7cabe6e950a91d3b70033
|
/my.py
|
349461c468d1fd3de4b6703cc1822ab8830a33fa
|
[] |
no_license
|
JBprojects/Tournament-Management-System
|
4975a8b2ebf0d7b97dac946c7c5dd821c90a97a7
|
5590b3dffe31f1e00445750e88524d61f2b2f721
|
refs/heads/master
| 2021-04-30T12:04:17.577780
| 2018-02-13T03:25:59
| 2018-02-13T03:25:59
| 121,266,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url='http://www.cricbuzz.com/cricket-match/live-scores'
uClient=uReq(my_url)
page_html=uClient.read()
uClient.close()
filename = "getcon.txt"
f=open(filename,"w")
f.write("")
page_soup=soup(page_html, "html.parser")
containers=page_soup.findAll("div",{"class":"cb-mtch-lst cb-col cb-col-100 cb-tms-itm"})
container=containers[0]
c=container.div.a
print("title " + c.text)
b=c.text
f.write(b+'\n')
type=container.findAll("div",{"class":"text-gray"})
for t in type:
t1=t.text
print(" " + t1)
f.write(t1+'\n')
match=page_soup.findAll("div",{"class":"cb-col-50 cb-col"})
m=match[0].text
print("status " + m)
f.write(m+'\n')
f.close()
|
[
"noreply@github.com"
] |
JBprojects.noreply@github.com
|
8220412e8380ae33458fd4830005ce9ca6d5ac81
|
3bb57eb1f7c1c0aced487e7ce88f3cb84d979054
|
/sgss_retro_senseaware/scripts/analyzers/closest/get_ambiguous_words.py
|
8e1da2dc3308fc7f03a6d62782ced0e68cc2ac32
|
[] |
no_license
|
ghpaetzold/phd-backup
|
e100cd0bbef82644dacc73a8d1c6b757b2203f71
|
6f5eee43e34baa796efb16db0bc8562243a049b6
|
refs/heads/master
| 2020-12-24T16:41:21.490426
| 2016-04-23T14:50:07
| 2016-04-23T14:50:07
| 37,981,094
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,182
|
py
|
import urllib2, re, gensim
from nltk.corpus import wordnet as wn
import numpy as np
from sklearn.decomposition import PCA
exp = re.compile('<BR>([^<]*)<BR>')
conn = urllib2.urlopen('http://www.enchantedlearning.com/wordlist/nounandverb.shtml')
html = conn.read()
ocs = [oc.strip() for oc in exp.findall(html) if len(oc.strip().split(' '))==1]
ocmap = {}
synmap = {}
for word in ocs:
syns = wn.synsets(word)
ants = set([])
for syn in syns:
for lemma in syn.lemmas():
ants.update(lemma.antonyms())
ocmap[word] = len(ants)
synmap[word] = len(syns)
words = sorted(ocmap.keys(), key=ocmap.__getitem__, reverse=True)
#for word in words:
# print(word + ': ' + str(synmap[word]) + ', ' + str(ocmap[word]))
print('Loading...')
wvmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_100_cbow.bin'
wvrmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_100_cbow_retrofitted.bin'
pwvmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_generalized_100_cbow.bin'
pwvrmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_generalized_100_cbow_retrofitted.bin'
m = gensim.models.word2vec.Word2Vec.load_word2vec_format(wvmodel, binary=True)
pm = gensim.models.word2vec.Word2Vec.load_word2vec_format(pwvmodel, binary=True)
mr = gensim.models.word2vec.Word2Vec.load_word2vec_format(wvrmodel, binary=True)
pmr = gensim.models.word2vec.Word2Vec.load_word2vec_format(pwvrmodel, binary=True)
#Select words to calculate PCA of:
simmap = {}
simmapr = {}
selected = []
all = []
X = []
i = 0
words = ['stand']
while len(selected)<1 and i<len(words):
word = words[i]
print(str(word))
nvec = pm[word+'|||N']
vvec = pm[word+'|||V']
TEMsim = m.most_similar(word, topn=10)
SEMsimn = pm.most_similar(word+'|||N', topn=5)
SEMsimv = pm.most_similar(word+'|||V', topn=5)
REMsim = mr.most_similar(word, topn=10)
RSEMsimn = pmr.most_similar(word+'|||N', topn=5)
RSEMsimv = pmr.most_similar(word+'|||V', topn=5)
#Add it to the selected list:
selected.append(word)
#Add them to the similarity map:
simmap[word] = TEMsim
simmap[word+'|||N'] = SEMsimn
simmap[word+'|||V'] = SEMsimv
simmapr[word] = REMsim
simmapr[word+'|||N'] = RSEMsimn
simmapr[word+'|||V'] = RSEMsimv
#Add them to list of words:
all.append(word)
all.append(word+'|||N')
all.append(word+'|||V')
temp = TEMsim + SEMsimn + SEMsimv
for simw in temp:
all.append(simw[0].strip())
all.append(word)
all.append(word+'|||N')
all.append(word+'|||V')
temp = REMsim + RSEMsimn + RSEMsimv
for simw in temp:
all.append(simw[0].strip())
#Add them to X matrix:
X.append(m[word])
X.append(nvec)
X.append(vvec)
for simw in TEMsim:
X.append(m[simw[0]])
for simw in SEMsimn:
X.append(pm[simw[0]])
for simw in SEMsimv:
X.append(pm[simw[0]])
X.append(mr[word])
X.append(pmr[word+'|||N'])
X.append(pmr[word+'|||V'])
for simw in REMsim:
X.append(mr[simw[0]])
for simw in RSEMsimn:
X.append(pmr[simw[0]])
for simw in RSEMsimv:
X.append(pmr[simw[0]])
i += 1
X = np.array(X)
print('X lines: ' + str(len(X)))
print('X columns: ' + str(len(X[0])))
print('All lines: ' + str(len(all)))
#Calculate PCA:
print('PCA...')
pca = PCA(n_components=2)
X = pca.fit_transform(X)
#Create vector map:
vecmap = {}
vecmapr = {}
for i in range(0, int(len(all)/2)):
word = all[i]
vec = X[i]
vecmap[word] = vec
for i in range(int(len(all)/2), len(all)):
word = all[i]
vec = X[i]
vecmapr[word] = vec
#Create files:
o1 = open('similar_map.txt', 'w')
o2 = open('vector_map.txt', 'w')
for word in simmap:
line = word + '\t'
for sim in simmap[word]:
line += sim[0].strip() + '\t'
o1.write(line.strip() + '\n')
o1.close()
for word in vecmap:
line = word + '\t' + str(vecmap[word][0]) + '\t' + str(vecmap[word][1]) + '\n'
o2.write(line)
o1.close()
o2.close()
o1 = open('similar_mapr.txt', 'w')
o2 = open('vector_mapr.txt', 'w')
for word in simmapr:
line = word + '\t'
for sim in simmapr[word]:
line += sim[0].strip() + '\t'
o1.write(line.strip() + '\n')
o1.close()
for word in vecmapr:
line = word + '\t' + str(vecmapr[word][0]) + '\t' + str(vecmapr[word][1]) + '\n'
o2.write(line)
o1.close()
o2.close()
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
f0045ef4ff1340561ba46f011f3a5dcb1dacc65c
|
feee95b58b25527a1c962931c22427b3eaf98467
|
/ex4.py
|
b100b6c40a44ff8f13f6db3541c351d76b30ed80
|
[] |
no_license
|
odinokov7/3hw
|
d4638ba9f17e8c5dd628d5b2ff2288b5a68cc967
|
992c1d8591b4e06fd4042838766f4ee0f0f5f1cf
|
refs/heads/master
| 2023-03-02T21:26:48.476826
| 2021-02-10T16:55:28
| 2021-02-10T16:55:28
| 337,789,981
| 0
| 0
| null | 2021-02-10T16:57:42
| 2021-02-10T16:52:27
|
Python
|
UTF-8
|
Python
| false
| false
| 444
|
py
|
x = float(input('Введите x: '))
y = int(input('Введите y: '))
if y == 0:
print('y не может быть 0')
elif y > 0:
y = y * -1
def my_func_first_try(arg1, arg2):
return arg1 ** arg2
def my_func_second_try(arg1, arg2):
arg2 = abs(arg2)
otv = arg1
for i in range(1, arg2):
otv = otv * arg1
i += 1
return 1 / otv
print(my_func_first_try(x, y))
print(my_func_second_try(x, y))
|
[
"odinokov7@gmail.com"
] |
odinokov7@gmail.com
|
612cf026a12b0609b69d79e090a311dd47a04266
|
96e93c81addf58445f6f332f33430117f3f57306
|
/player.py
|
35d2e1515eceb20ca23a2857b7818490ca9b1e8a
|
[] |
no_license
|
faisal-ahmed/Balloon-Shooter-Game
|
69b38f4e6ad402a863d037ba0e66ac052f1b4107
|
cab9942602f4cce4680ce794e9b15e0b7b75ef80
|
refs/heads/master
| 2020-04-17T08:08:35.009798
| 2019-01-18T12:21:38
| 2019-01-18T12:21:38
| 166,399,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
#Player Management Package
from settings import *
class Player(object):
"""docstring for Player"""
PLAYER_IMAGE_URL = "myResources/images/shooter.png"
HEALTHBAR_IMAGE_URL = "myResources/images/healthbar.png"
HEALTH_IMAGE_URL = "myResources/images/health.png"
PLAYER_POSITION = [80, 80]
PLAYER_SIZE = (120, 120)
def __init__(self):
self.health_value = 194 #194
self.loaded_player = pygame.image.load(Player.PLAYER_IMAGE_URL)
self.transformed_player = pygame.transform.scale(self.loaded_player, Player.PLAYER_SIZE)
self.healthbar = pygame.image.load(Player.HEALTHBAR_IMAGE_URL)
self.health = pygame.image.load(Player.HEALTH_IMAGE_URL)
def rotatePlayer(self):
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(Player.PLAYER_POSITION[1]+32), position[0]-(Player.PLAYER_POSITION[0]+26))
playerrot = pygame.transform.rotate(self.transformed_player, 360-angle*57.29)
playerpos1 = (Player.PLAYER_POSITION[0]-playerrot.get_rect().width/2, Player.PLAYER_POSITION[1]-playerrot.get_rect().height/2)
WINDOW.blit(playerrot, playerpos1)
def healthBar(self):
WINDOW.blit(self.healthbar, (5,5))
for health1 in range(self.health_value):
WINDOW.blit(self.health, (health1+8, 8))
def drawClock(self):
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
WINDOW.blit(survivedtext, textRect)
|
[
"faisal.ahmed0001@gmail.com"
] |
faisal.ahmed0001@gmail.com
|
1ff59f385afc8cb760932bbdfb1aaee3163dc983
|
f2a3f57379cb375c33442afb03baef005b92f819
|
/이혜은/1027/멀쩡한 사각형.py
|
aef05f5d6bda02145921cae85e3c9cb11f466ecb
|
[] |
no_license
|
rubetyy/Algo-study
|
9e2d80b2edcd37c67c4c824f5e61b65be272cf06
|
d7165da60c98227d6f4abf18aa19cd79e006ea59
|
refs/heads/master
| 2023-09-02T05:52:41.517447
| 2021-11-23T04:12:04
| 2021-11-23T04:12:04
| 418,523,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def solution(w,h):
tmp = 0
if w <= h:
until = w
else:
until = h
for i in range(until, -1, -1):
if h%i == 0 and w%i == 0:
tmp = i
break
answer = w*h - (w+h-tmp)
return answer
|
[
"snflo16@naver.com"
] |
snflo16@naver.com
|
6725e52f3a46033891bb2ae6e8007889caaa26e6
|
81a9b528fbb79a6109a6c011cca9d59ff45dab92
|
/utils/sampler.py
|
7bacdd0f147134035948b3e1b0783f61e9cc2f55
|
[
"MIT"
] |
permissive
|
lconet/binary_quality_classification
|
a863ee4f3a0f37bbaa1dd310f1038beb853a82fb
|
72530438a9e0bd3d036fdf966f3ef9881d898e3d
|
refs/heads/master
| 2022-12-21T14:21:24.919972
| 2020-09-23T20:35:27
| 2020-09-23T20:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
class Sampler(object):
def __init__(self, sampler_type):
self.sampler_type = type
def __call__(self):
if self.sampler_type = "random":
pass
elif self.sampler_type = "cartesian":
pass
return samples
def cartesian_sampler(self):
pass
def random_sampler(self):
pass
@staticmethod
def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:
"""Constructs the PMF in a Gaussian shape.
Args:
x (np.array): Random Variables.
mean (float): Mean of the Gaussian RV.
sigma (float): Standard deviation of the Gaussian RV.
Returns:
x (np.array): PMF in a Gaussian shape given the random variables and
parameters.
"""
x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)
x /= np.sqrt(2 * np.pi * sigma ** 2)
x /= x.sum()
return x
@staticmethod
def reduced_normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:
"""Constructs the PMF in a Gaussian shape.
PMF value of the mean value has been assigned to 0.
Args:
x (np.array): Random Variables.
mean (float): Mean of the Gaussian RV.
sigma (float): Standard deviation of the Gaussian RV.
Returns:
x (np.array): PMF in a Gaussian shape given the random variables and
parameters.
"""
x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)
x /= np.sqrt(2 * np.pi * sigma ** 2)
x[mean] = 0.
x /= x.sum()
return x
|
[
"caneozester@gmail.com"
] |
caneozester@gmail.com
|
090e36be37fd0bcd41780a1de55f790c4c445f94
|
7a03201ccadf7ef3dcb6cd6676bc893bf412cedf
|
/lecture02/numbers2.py
|
9f198713886971782fb4ac521fd25137c7578e1e
|
[] |
no_license
|
uselesssparrow/pythonp_hw
|
732571321f0d1793c320d9673d7991d0007e15bb
|
27a3df0bc84cc74bfd9ec23a3441c7d06f4d31b0
|
refs/heads/master
| 2023-01-10T03:34:34.779957
| 2020-11-17T04:15:56
| 2020-11-17T04:15:56
| 298,477,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
st_in=input("Введите целое число от 0 до 99: ")
if not str.isdigit(st_in):
print("Введено не целое число или буквы")
exit()
st_in=int(st_in)
if (st_in<0) or (st_in>99): #проверка диапазона
print("Введено число не в диапазоне от 0 до 99")
exit()
num=''
if (st_in//10==9):
num+='девяносто'
elif (st_in//10==8):
num+='восемьдесят'
elif (st_in//10==7):
num+='семьдесят'
elif (st_in//10==6):
num+='шестьдесят'
elif (st_in//10==5):
num+='пятьдесят'
elif (st_in//10==4):
num+='сорок'
elif (st_in//10==3):
num+='тридцать'
elif (st_in//10==2):
num+='двадцать'
if (st_in%10==9):
num+=' девять'
elif (st_in%10==8):
num+=' восемь'
elif (st_in%10==7):
num+=' семь'
elif (st_in%10==6):
num+=' шесть'
elif (st_in%10==5):
num+=' пять'
elif (st_in%10==4):
num+=' четырe'
elif (st_in%10==3):
num+=' три'
elif (st_in%10==2):
num+=' два'
elif (st_in%10==1):
num+=' один'
if (st_in==19):
num='девятнадцать'
elif (st_in==18):
num='восемнадцать'
elif (st_in==17):
num='семнадцать'
elif (st_in==16):
num='шестнадцать'
elif (st_in==15):
num='пятнадцать'
elif (st_in==14):
num='четырнадцать'
elif (st_in==13):
num='тринадцать'
elif (st_in==12):
num='двенадцать'
elif (st_in==11):
num='одиннадцать'
elif (st_in==10):
num='десять'
if(st_in==0):
num='ноль'
print(num)
|
[
"71866532+uselesssparrow@users.noreply.github.com"
] |
71866532+uselesssparrow@users.noreply.github.com
|
f6b30be99b68743ed3946fedc0913932558006a1
|
3073334bbdf95403e07d41f66c21409fa70910fa
|
/pagerduty_trigger/__init__.py
|
96f6a607b35ab378eb4e2cd5d2ea3f099ba56c27
|
[] |
no_license
|
Bhanditz/pagerduty_trigger
|
aa9caa09794a4fa70f2441492d666a0970aaf3c1
|
69d5be3df9cb7b5f94f996b0441d85b7f0d3c01f
|
refs/heads/master
| 2023-04-14T13:40:17.658586
| 2015-12-11T16:19:48
| 2015-12-11T16:19:48
| 165,651,743
| 1
| 0
| null | 2023-04-04T00:14:26
| 2019-01-14T11:43:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,795
|
py
|
# -*- coding: utf-8 -*-
'''
Pagerduty actions
'''
from __future__ import absolute_import
import pygerduty
import logging
from pagerduty_trigger import broker
logger = logging.getLogger(__name__)
class IncidentKeyLocked(Exception):
'''
Exception for when the incident key has already been used recently
'''
pass
class IncidentKeyLock(object):
'''
Check for if an incident was already used
'''
_rconn = None
def __init__(self, incident_key, settings):
'''
Args:
incident_key (str): unique incident key to tie to error
settings (object): settings object
Returns:
None
'''
self.incident_key = incident_key
self.settings = settings
@property
def rconn(self):
'''
Redis connection object
'''
if self._rconn is None:
logger.info(broker)
self._rconn = broker.RedisClass(self.settings)
return self._rconn
def __enter__(self):
'''
Create a lock on redis to decrease the number of alerts to the pagerduty api
'''
logger.info('Check for redis lock: {0}'.format(self.incident_key))
# First check for a redis lock
rlock_status = self.rconn.set(self.incident_key, 'locked', ex=180, nx=True)
if rlock_status is None:
logger.info('Redis lock already exists for incident: {0}.'.format(self.incident_key))
raise IncidentKeyLocked("IncidentKey {0} is locked via Redis".format(self.incident_key), None)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Only clean up the lock if the alert failed.
This should be left to expire if there is not an error, to help keep extra
calls from going through for 180 seconds. To decrease the amount of time
we spend alerting pagerduty.
'''
if exc_type is not None:
self.rconn.delete(self.incident_key)
class Pager(object):
pager = None
settings = None
def __new__(cls, settings):
'''
Cache base class
Args:
settings (object): pagerduty settings
Returns:
Pager (object): trigger incidents on pagerduty api
'''
if cls.pager is None:
cls.settings = settings
# api_token is not actually used for what we are doing, we don't
# need to auth only send to the service_key below
cls.pager = pygerduty.PagerDuty(cls.settings.PAGERDUTY_SUBDOMAIN, api_token='junk')
return super(Pager, cls).__new__(cls)
def trigger_incident(self, description, incident_key, details=None, client=None, client_url=None):
'''
Trigger an incident in the pagerduty api
Args:
description (str): Description on why alert is called
incident_key (str): unique string for incident
details (dict): dictionary with extra details
client (str): arbitrary product name
client_url (str): arbitrary product url
Returns:
bool:
True if the call succeeded
False if the call failed
None if no call was made
'''
if self.settings.PAGERDUTY_SERVICE_KEY is None:
return None
service_key = self.settings.PAGERDUTY_SERVICE_KEY
try:
with IncidentKeyLock(incident_key, self.settings):
self.pager.trigger_incident(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url)
return True
except IncidentKeyLocked:
return None
|
[
"danielwallace@gtmanfred.com"
] |
danielwallace@gtmanfred.com
|
654d6b5f0e13544c626b589ac4de64c67b3ca229
|
f48763f1080bf4e3a0efa69f1d937f6107ac8aca
|
/medbot_app/models.py
|
d8ff808779420e01cb498a6d1806fcb506636775
|
[] |
no_license
|
n1az/MedBot
|
4a71dcc41a5d06615f8aecc6d961b165ed840542
|
713fe1565ddf3d23d7958bf8fad521597676873e
|
refs/heads/master
| 2023-04-29T07:21:02.932495
| 2023-04-12T03:09:14
| 2023-04-12T03:09:14
| 275,631,345
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,803
|
py
|
from django.db import models
class Employee(models.Model):
pharmacy_id = models.BigAutoField(primary_key = True)
pharmacy_address = models.CharField(max_length = 254)
owner_name = models.CharField(max_length = 100)
pharmacy_name = models.CharField(max_length = 100)
pharmacy_reg_id = models.CharField(max_length = 100)
employee_password = models.CharField(max_length = 100)
pharmacy_rating = models.FloatField(max_length=3, null=True)
pharmacy_rating_count = models.PositiveIntegerField(default = 0)
employee_email = models.EmailField(max_length = 254)
employee_phone = models.CharField(max_length = 15)
employee_longT = models.CharField(max_length = 20, default= "90.40")
employee_latiT = models.CharField(max_length = 20, default= "20.40")
def __str__(self):
return self.pharmacy_name
class Inventory(models.Model):
"""docstring for Inventory"""
MUSTPRESCRIBED = 'MP'
MUSTNOTPRESCRIBE = 'MNP'
MEDICINESTATUS = [
(MUSTPRESCRIBED, 'Must be Prescribed'),
(MUSTNOTPRESCRIBE, 'Must not be Prescribed'),
]
GENERAL = 'A'
BLOOD = 'B'
DIGESTIVE = 'D'
EYE = 'F'
EAR = 'H'
CIRCULATORY = 'K'
MUSCULOSKELETAL = 'L'
NEUROLOGICAL = 'N'
PSYCHOLOGICAL = 'P'
RESPIRATORY = 'R'
SKIN = 'S'
ENDORCRINE = 'T'
UROLOGY = 'U'
PREGNANCY = 'W'
FEMALEGENITAL = 'X'
MALEGENITAL = 'Y'
SOCIALPROB = 'Z'
MEDICINECATAGORIES = [
(GENERAL, 'General and unspecified'),
(BLOOD, 'Blood, blood forming organs, lymphatics, spleen'),
(DIGESTIVE, 'Digestive'),
(EYE, 'Eye'),
(EAR, 'Ear'),
(CIRCULATORY, 'Circulatory'),
(MUSCULOSKELETAL, 'Musculoskeletal'),
(NEUROLOGICAL, 'Neorological'),
(PSYCHOLOGICAL, 'Psychological'),
(RESPIRATORY, 'Respiratory'),
(SKIN, 'Skin'),
(ENDORCRINE, 'Endocrine, metabolic and nutritional'),
(UROLOGY, 'Urology'),
(PREGNANCY, 'Pregnancy, childbirth, family planning'),
(FEMALEGENITAL, 'Female genital system and breast'),
(MALEGENITAL, 'Male genital system'),
(SOCIALPROB, 'Social problems'),
]
med_name = models.CharField(max_length = 200)
med_id = models.BigAutoField(primary_key = True, serialize=False)
med_price = models.FloatField(max_length = 10)
med_quantity = models.PositiveIntegerField()
med_status = models.CharField(max_length = 3, choices = MEDICINESTATUS, default=MUSTPRESCRIBED)
med_catagory = models.CharField(max_length = 2, choices = MEDICINECATAGORIES, default = GENERAL)
med_generic = models.CharField(max_length = 100)
pharmacy_id = models.ForeignKey(Employee, on_delete = models.CASCADE)
def is_upperclass(self):
return self.year_in_school in {self.MUSTPRESCRIBED, self.MUSTNOTPRESCRIBE}
def __str__(self):
return self.med_name
class Customer(models.Model):
customer_name = models.CharField(max_length = 100)
customer_id = models.BigAutoField(primary_key = True, serialize = False)
birthdate = models.DateField()
customer_address = models.CharField(max_length = 254)
customer_password = models.CharField(max_length = 100)
customer_email = models.EmailField(max_length = 254)
customer_phone = models.CharField(max_length = 15)
customer_longT = models.CharField(max_length = 20, default= "90.40")
customer_latiT = models.CharField(max_length = 20, default= "20.40")
def __str__(self):
return self.customer_name
class Delivery(models.Model):
DS_id = models.AutoField(primary_key = True)
DS_start_time = models.TimeField()
DS_stop_time = models.TimeField()
DS_capacity = models.IntegerField(default = 15)
DS_status = models.BooleanField(default = True)
def __str__(self):
return str(self.DS_id)
class Cart(models.Model):
cart_id = models.BigAutoField(primary_key = True)
pharmacy_id = models.ForeignKey(Employee, on_delete = models.CASCADE)
customer_id = models.ForeignKey(Customer, on_delete = models.CASCADE)
adding_quantity = models.IntegerField(default= 5)
med_id = models.ForeignKey(Inventory, on_delete = models.CASCADE)
def __str__(self):
return str(self.cart_id)
class OrderedCart(models.Model):
order_cart_id = models.BigAutoField(primary_key = True)
pharmacy_id = models.ForeignKey(Employee, on_delete = models.CASCADE)
customer_id = models.ForeignKey(Customer, on_delete = models.CASCADE)
adding_quantity = models.IntegerField(default= 5)
med_id = models.ForeignKey(Inventory, on_delete = models.CASCADE)
def __str__(self):
return str(self.order_cart_id)
class Order(models.Model):
ONPROCESS = 'OP'
ONTHEWAY = 'OTW'
DELIVERED = 'DV'
DELIVERYSTATUS = [
(ONPROCESS, 'Processing'),
(ONTHEWAY, 'On the way'),
(DELIVERED, 'Medicine Delivered')
]
CASHONDELVRY = 'COD'
BKASH = 'BKS'
ROCKET = 'RKT'
CARD = 'CRD'
PAYPAL = 'PPL'
PAYMENTOPTIONS = [
(CASHONDELVRY, 'Cash On Delivery'),
(BKASH, 'Bkash'),
(ROCKET, 'Rocket'),
(CARD, 'ATM Card'),
(PAYPAL, 'PayPal')
]
order_id = models.BigAutoField(primary_key = True)
order_date = models.DateTimeField(auto_now_add=True)
pharmacy_id = models.ManyToManyField(Employee)
customer_id = models.ForeignKey(Customer, on_delete = models.CASCADE)
delivery_id = models.ForeignKey(Delivery, on_delete = models.CASCADE)
delivery_status = models.CharField(max_length = 3, choices = DELIVERYSTATUS, default = ONPROCESS)
rating = models.IntegerField()
order_quantity = models.IntegerField(default= 5)
med_ids = models.ManyToManyField(Inventory)
order_status = models.BooleanField(default=False)
order_type = models.CharField(max_length = 3, choices = PAYMENTOPTIONS, default = CASHONDELVRY)
delivery_note = models.CharField(max_length = 100, default = "Call me when you arrive")
order_cost = models.CharField(max_length = 10, default = "10")
order_longT = models.CharField(max_length = 20, default= "90.40")
order_latiT = models.CharField(max_length = 20, default= "20.40")
orered_cart = models.ManyToManyField(OrderedCart)
def __str__(self):
return str(self.order_id)
class Admin(models.Model):
admin_name = models.CharField(max_length = 100)
admin_password = models.CharField(max_length = 100)
admin_id = models.AutoField(primary_key = True)
admin_designation = models.CharField(max_length = 50)
admin_phone = models.CharField(max_length = 15)
admin_email = models.EmailField(max_length = 254)
def __str__(self):
return self.admin_name
class Prescription(models.Model):
pres_id = models.BigAutoField(primary_key = True)
customer_id = models.ForeignKey(Customer, on_delete = models.CASCADE)
pres_status = models.BooleanField(default=False)
order_id = models.ForeignKey(Order, on_delete = models.CASCADE)
def __str__(self):
return str(self.pres_id)
class Coupon(models.Model):
coupon_id = models.BigAutoField(primary_key = True)
coupon_code = models.CharField(max_length = 10)
coupon_amount = models.IntegerField()
med_id = models.ForeignKey(Inventory, on_delete = models.CASCADE)
def __str__(self):
return self.coupon_code
|
[
"muhammadniazmorshed@gmail.com"
] |
muhammadniazmorshed@gmail.com
|
3c26872318ad21cb83b2b723de7ce8593642ae93
|
59d65cd3fa9e614bfd539aff88744cc2b450cf8b
|
/ex101 - funcao voto.py
|
75644343f216ad4c14cf5e36f25f2dc6fc52e3c1
|
[] |
no_license
|
gerolaleticia/Voting-function
|
89c5cca817869e8a9fc514a85f88b1db0ce98f76
|
53e35c3eeca7a4a1ced9c7a99db7d0731ab2c6df
|
refs/heads/master
| 2021-03-30T02:09:14.594967
| 2020-04-01T20:21:50
| 2020-04-01T20:21:50
| 248,005,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
def voto(ano):
from datetime import date
atual = date.today().year
idade = atual - ano
if atual - ano < 18 and atual - ano > 2016:
print(f'Com {idade} anos o voto é OPCIONAL.')
elif atual - ano >= 65:
print(f'Com {idade} anos o voto é OPCIONAL.')
elif atual - ano < 16:
print(f'Com {idade} anos NÃO VOTA.')
else:
print(f'Com {idade} anos o voto é OBRIGATÓRIO.')
#programa principal
ano = int(input('Em que ano a pessoa nasceu? '))
voto(ano)
'''def voto(ano):
from datetime import date
atual = date.today().year
idade = atual - ano
if atual - ano < 18 and atual - ano > 2016:
return (f'Com {idade} anos o voto é OPCIONAL.')
elif atual - ano >= 65:
return (f'Com {idade} anos o voto é OPCIONAL.')
elif atual - ano < 16:
return (f'Com {idade} anos NÃO VOTA.')
else:
return (f'Com {idade} anos o voto é OBRIGATÓRIO.')
#programa principal
print(voto(2000))'''
|
[
"noreply@github.com"
] |
gerolaleticia.noreply@github.com
|
2925862530fc1d63e4d390624948e264adb3f2ca
|
104baf85a7fed4bbb738e66f5a308dcf5360a201
|
/VQCPCB/data_processor/bach_data_processor.py
|
a7fe2c2b39ec95fd9cfadda58d87d528889a2ce5
|
[] |
no_license
|
MGSong/vqcpc-bach
|
09403c217bdd5165a4609b511ef837f4a19f650a
|
36a772bf99a7a2aba462bd86d362b7180f08847a
|
refs/heads/master
| 2023-02-22T20:32:06.728063
| 2021-01-27T14:03:18
| 2021-01-27T14:03:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
import torch
from VQCPCB.data_processor.data_processor import DataProcessor
from VQCPCB.utils import flatten, to_numpy
class BachDataProcessor(DataProcessor):
def __init__(self, embedding_size, num_events, num_tokens_per_channel):
super(BachDataProcessor, self).__init__(embedding_size=embedding_size,
num_events=num_events,
num_tokens_per_channel=num_tokens_per_channel
)
|
[
"crestel.leopold@gmail.com"
] |
crestel.leopold@gmail.com
|
411a288382166b49c68b10168485d9a50dbbfaf3
|
d69d43fd29177e86f1ad697d158a7d4eb2c14d63
|
/my_dogs.py
|
df9572710fdd5389a08458f6babbea4034a0910c
|
[] |
no_license
|
sicaramacuya/superhero-team-dueler
|
3a8ccc764ce75b49730e300dfe1a2ff3336ecd97
|
1afe77b79d5ba54ce47cac2b49d172a0576fc584
|
refs/heads/main
| 2023-02-05T20:53:07.708087
| 2020-12-04T21:52:09
| 2020-12-04T21:52:09
| 315,133,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from dog import Dog
my_dog = Dog('Rex', 'SuperDog')
my_dog.bark()
my_other_dog = Dog('Annie', 'SuperDog')
print(my_other_dog.name)
amigo = Dog('Amigo', 'SuperSuperDog')
solo = Dog('Solo', 'SuperDuperDog')
mando = Dog('Mando', 'SuperSuperDuperDog')
amigo.sit()
solo.roll()
mando.bark()
|
[
"eric.morales-rodriguez@students.makeschool.com"
] |
eric.morales-rodriguez@students.makeschool.com
|
edd73bda636781493ea4aceee5f06ca00ecb80be
|
61602ef53c4a4a16df06e09e91763a155807c2dc
|
/tests/test_scxml.py
|
7bb8d78843e2c1ae821268280b8956dc0d06518d
|
[
"MIT"
] |
permissive
|
matEhickey/xstate-python
|
17265332e20caf039931900d7984860438f4e318
|
09f97897004d1e4f06e666fee3cdb0d55c9d91ce
|
refs/heads/master
| 2022-12-31T17:25:23.097920
| 2020-08-17T13:07:35
| 2020-08-17T13:07:35
| 288,171,262
| 0
| 0
|
MIT
| 2022-09-06T13:29:46
| 2020-08-17T12:13:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
import xml.etree.ElementTree as ET
from typing import Optional, Dict, List
import json
import pytest
from xstate.scxml import scxml_to_machine
test_dir = "node_modules/@scion-scxml/test-framework/test"
test_groups: Dict[str, List[str]] = {"actionSend": ["send1"]}
test_files = [
(f"{test_dir}/{k}/{vv}.scxml", f"{test_dir}/{k}/{vv}.json")
for k, v in test_groups.items()
for vv in v
]
@pytest.mark.parametrize("scxml_source,scxml_test_source", test_files)
def test_scxml(scxml_source, scxml_test_source):
machine = scxml_to_machine(scxml_source)
state = machine.initial_state
with open(scxml_test_source) as scxml_test_file:
scxml_test = json.load(scxml_test_file)
for event_test in scxml_test.get("events"):
event_to_send = event_test.get("event")
event_name = event_to_send.get("name")
next_configuration = event_test.get("nextConfiguration")
state = machine.transition(state, event_name)
assert [sn.key for sn in state.configuration] == next_configuration
|
[
"davidkpiano@gmail.com"
] |
davidkpiano@gmail.com
|
17627ea9e12d3290f0494d045472dbace5088570
|
3794d14d99ef737217f568234058811595d9ec61
|
/analysis_nlp.py
|
27dccef1a6c62935aaef8e726679be9f26664693
|
[] |
no_license
|
thepharmproject/set_of_scripts
|
ff109dda5556f1cf77fad0b87fe810439a8e776c
|
9c09a543924168424d44d3589fc3a34ea9c7c218
|
refs/heads/master
| 2023-01-28T04:16:05.990536
| 2020-12-11T08:09:31
| 2020-12-11T08:09:31
| 294,672,197
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80,402
|
py
|
''' This python file includes various nlp analysis methods '''
''' PYTHON SETUP '''
# a list of the required packages is listed here based on anaconda setup commands.
# conda install seaborn
# conda install scikit-learn
# conda install -c conda-forge parsedatetime
# conda install -c conda-forge dateparser
# conda install -c conda-forge datefinder
# conda install -c conda-forge textblob
# conda install -c conda-forge googletrans
# conda install -c conda-forge langdetect
# conda install -c conda-forge geopy
# conda install -c conda-forge jellyfish
# conda install -c conda-forge matplotlib
# conda install -c conda-forge spacy
# python -m spacy download en_core_web_sm
# python -m spacy download en_core_web_md
# python -m spacy download el_core_news_sm
# python -m spacy download el_core_news_md
# python -m spacy download es_core_news_sm
# python -m spacy download es_core_news_md
# python -m spacy download it_core_news_sm
# python -m spacy download it_core_news_md
''' LIBRARIES IMPORTED '''
import time, argparse, string, json, sys
from textblob import TextBlob
from googletrans import Translator
from langdetect import detect
import parsedatetime, dateparser, datefinder
from geopy.geocoders import Nominatim, GoogleV3
from difflib import SequenceMatcher
import jellyfish
import spacy
from spacy import displacy
from spacy.matcher import Matcher, PhraseMatcher
# from spacy.lang.en import English, Spanish, Italian
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from collections import Counter
import utilities as utils
''' ANALYSIS METHODS '''
# ******************************************************************************************
# detect language from text. a recursive approach is adopted for improved robustness.
# textblob, google translate and langdetect services are used. if a service fails the result
# form the next one is requested.
def detect_language(text):
print('* language detection')
lang = None
try:
lang = TextBlob(text[:100]).detect_language()
print('\tlanguage is (TextBlob):', lang)
except:
print('\tTextBlob failed')
try:
lang = Translator().detect(text[:100]).lang
print('\tlanguage is (Google Translator):', lang,)
except:
print('\tGoogle Translator failed')
try:
lang = detect(text[:100])
print('\tlanguage is (langdetect):', lang)
except:
print('\tlangdetect failed')
if lang is None: print('\tlanguage detection failed...')
return lang
# ******************************************************************************************
# detect datetime from metadata and text. a recursive approach is adopted here as well.
# dateparser, datefinder and parsedatetime packages are exploited ranked from higher accuracy
# to higher probability of returning a result. if the most accurate fails to detect the
# datetime object, the next service is called and so on. detection is based on metadata,
# where date date information is commonly present. if datetime detection fails for all
# services in metadata, the same workflow is applied to text data.
def detect_datetime(text, meta, lang):
print('* datetime detection')
# initialize results
results = []
date = None
print('\tmeta:', meta)
if len(results) == 0:
try:
date = dateparser.parse(meta)
if date is not None:
print('\tdateparser meta:', date)
results.append(str(date))
except: print('\tdateparser meta failed')
if len(results) == 0:
print('\tdateparser meta: none')
try:
dates = datefinder.find_dates(meta)
for date_item in dates:
date = date_item
print('\tdatefinder meta:', date)
results.append(str(date))
break
except: print('\tdatefinder meta failed')
if len(results) == 0:
print('\tdatefinder meta: none')
try:
date = dateparser.parse(text)
if date is not None:
print('\tdateparser text:', date)
results.append(str(date))
except: print('\tdateparser text failed')
if len(results) == 0:
print('\tdateparser text: none')
try:
dates = datefinder.find_dates(text)
for date_item in dates:
date = date_item
print('\tdatefinder text:', date)
results.append(str(date))
break
except: print('\tdatefinder text failed')
if len(results) == 0:
print('\tdatefinder text: none')
print('\tno datetime information found in text')
# datetime = parsedatetime.Calendar().parse(meta)
date = ''
results.append(str(date))
return results[0]
# ******************************************************************************************
# detect hate speech in text. three approaches are implemented (mode='strings', 'lemmas',
# 'vectors','both'). the first one is based in a dictionary of terms for four different
# languages, english, greek, italian and spanish. a language model is loaded (according to
# the language of the text), common practices are followed (lowercasing, lemmatization, stop
# word and punctuation removal), and the targeted terms are being searched in the text. if
# found, text segments are denoted as "hate speech". the second one is based in word vectors
# allowing for a more semantic detection. the same workflow is followed for this method as
# well (lemmatization etc.). if mode is set to "both" the union of the results from all
# methods is returned.
def detect_hate(text, meta, lang, mode='strings'):
print('* hate speech detection with mode \'{}\''.format(mode))
# initialize the results list
results = []
# load the appropriate language model
if mode == 'strings' or mode == 'lemmas': model_suffix = 'sm'
else: model_suffix = 'md'
if lang == 'en': nlp = spacy.load(lang + '_core_web_' + model_suffix)
elif lang == 'el' or lang == 'es' or lang == 'it': nlp = spacy.load(lang + '_core_news_' + model_suffix)
else: return ''
# load the filter terms from the dictionaries - safe words and phrases, secondary words and primary words
with open('Dictionaries\\dictionary_' + lang + '_s.txt', 'r', encoding='utf-8') as file: terms_list = file.read().splitlines()
with open('Dictionaries\\dictionary_' + lang + '_a.txt', 'r', encoding='utf-8') as file: terms_a = file.read().splitlines()
with open('Dictionaries\\dictionary_' + lang + '_b.txt', 'r', encoding='utf-8') as file: terms_b = file.read().splitlines()
# synthesize phrases
for term_a in terms_a:
for term_b in terms_b:
# find all suffixes and make all possible combinations
if term_a.find("/") > 0: term_a = term_a[:term_a.find("/")]
if term_b.find("/") > 0: term_b = term_b[:term_b.find("/")]
# if the suffix ends with a "-" join the words instead of making a phrase
if term_a[-1] =='-': terms_list.append(term_a[:-1] + term_b)
else: terms_list.append(term_a + ' ' + term_b)
#time_c = time.time()
# print('\tload dictionary:', time_c - time_b)
# find matches in text and search phrases
words_token = nlp(text)
# for each search phrase
for terms in terms_list:
# print('analyzing search term \'{}\''.format(terms))
matches = []
# for each word of the search phrase
for term_token in nlp(terms): #terms.split()
if len(matches) > 0 and matches[0] < 0:
#
break
word_pos = -1
matches.append(word_pos)
term_t = utils.clean_accent(term_token.text.lower())
term_tl = utils.clean_accent(term_token.lemma_.lower())
# for each word of the text
for word_token in words_token:
word_pos += 1
word_t = utils.clean_accent(word_token.text.lower())
word_tl = utils.clean_accent(word_token.lemma_.lower())
# string manipulation
if mode == 'strings' or mode == 'both':
score = SequenceMatcher(None, word_t, term_t).ratio()
if lang == 'el': match = word_t.find(term_t[:max(3, len(term_t)-3)])
else: match = word_t.find(term_t[:max(3, len(term_t)-1)])
if score > 0.5 and match == 0:
# print('\tstring manipulation for term \"{}\" and word \"{}\" with score {:.2f}'.format(term_token, word_token, score))
if not word_pos in matches: matches[len(matches)-1] = word_pos
break
# lemma manipulation
if mode == 'lemmas' or mode == 'both':
score = SequenceMatcher(None, word_tl, term_tl).ratio()
if score > 0.5:
# print('\tstring manipulation for term \"{}\" and word \"{}\" with score {:.2f}'.format(term_token, word_token, score))
if not word_pos in matches: matches[len(matches) - 1] = word_pos
break
# word vectors
if mode == 'vectors' or mode == 'both':
if word_token.has_vector and term_token.has_vector and len(word_token.text) > 5:
score = term_token.similarity(word_token)
if score > 0.8:
# print('\tword-vector for term \"{}\" and word \"{}\" with score {:.2f}:'.format(term_token, word_token, score))
if not word_pos in matches: matches[len(matches)-1] = word_pos
break
# confirm matches and locate text
match = True
for i in range(len(matches)):
if matches[i] < 0:
match = False
matches.sort()
if match == True:
# print('\tfound hate-speech for term \'{}\' positions are {}'.format(terms, matches))
# print the whole segment
# results.append('')
# for i in range(matches[0], matches[-1]+1): results[-1] += words_token[i].text + ' '
# print('\tpart of text:', results[-1])
# just print the word combination
results.append('(')
for i in range(len(matches)): results[-1] += words_token[matches[i]].text + ' '
results[-1] = results[-1][:-1] + ', ' + terms + ')'
#time_d = time.time()
#print('\tanalyze phrase: {:.2f}'.format(time_d - time_c))
# transform results to text
results_txt = ''
for result in results:
#
results_txt = results_txt + result + ", "
results_txt = results_txt[:-2]
print('\thate speech:', results_txt)
return results_txt
# ******************************************************************************************
# a faster implementation of the aforementioned hate speech detection.
def detect_hate_fast(text, meta, lang, mode='strings'):
print('* hate speech detection')
# initialize the results list
results = []
# load the appropriate language model
if mode == 'strings' or mode == 'lemmas': model_suffix = 'sm'
else: model_suffix = 'md'
if lang == 'en': nlp = spacy.load(lang + '_core_web_' + model_suffix)
elif lang == 'el' or lang == 'es' or lang == 'it': nlp = spacy.load(lang + '_core_news_' + model_suffix)
else: return ''
# load the filter terms from the dictionaries - safe words and phrases, secondary words and primary words
with open('Dictionaries\\dictionary_' + lang + '_s.txt', 'r', encoding='utf-8') as file: terms_s = file.read().splitlines()
with open('Dictionaries\\dictionary_' + lang + '_a.txt', 'r', encoding='utf-8') as file: terms_a = file.read().splitlines()
with open('Dictionaries\\dictionary_' + lang + '_b.txt', 'r', encoding='utf-8') as file: terms_b = file.read().splitlines()
for term_a in terms_a:
if term_a.find("/") > 0:
term_a = term_a[:term_a.find("/")]
for term_b in terms_b:
if term_b.find("/") > 0:
term_b = term_b[:term_b.find("/")]
# terms_b = list(dict.fromkeys(terms_b))
# find matches in text and search phrases
matches = []
words_token = nlp(text)
dict_pos = 0
for terms_t in [terms_a, terms_b]:
# for each term list
for terms in terms_t:
word_pos = -1
term_token = nlp(terms)[0]
term_t = utils.clean_accent(term_token.text.lower())
term_tl = utils.clean_accent(term_token.lemma_.lower())
# for each word of the text
for word_token in words_token:
word_pos += 1
word_t = utils.clean_accent(word_token.text.lower())
word_tl = utils.clean_accent(word_token.lemma_.lower())
# string manipulation
if mode == 'strings' or mode == 'both':
score = SequenceMatcher(None, word_t, term_t).ratio()
if lang == 'el': match = word_t.find(term_t[:max(3, len(term_t)-3)])
else: match = word_t.find(term_t[:max(3, len(term_t)-1)])
if score > 0.8 and match == 0:
# print('\tterm \"{}\" and word \"{}\" | score {:.2f} and position {}'.format(term_token, word_token, score, word_pos))
if not word_pos in matches:
matches.append(word_pos)
break
# lemma manipulation
if mode == 'lemmas' or mode == 'both':
score = SequenceMatcher(None, word_tl, term_tl).ratio()
if score > 0.75:
# print('\tstring manipulation for term \"{}\" and word \"{}\" with score {:.2f}'.format(term_token, word_token, score))
if not word_pos in matches:
matches.append(word_pos)
break
# word vectors
if mode == 'vectors' or mode == 'both':
if word_token.has_vector and term_token.has_vector and len(word_token.text) > 3:
score = term_token.similarity(word_token)
if score > 0.65:
# print('\tword-vector for term \"{}\" and word \"{}\" with score {:.2f}:'.format(term_token, word_token, score))
if not word_pos in matches:
matches.append(word_pos)
break
else:
continue
break
# confirm matches and locate text
if len(matches) == 2:
results.append('(')
for i in range(len(matches)): results[-1] += words_token[matches[i]].text + ' '
results[-1] = results[-1][:-1] + ')'
# transform results to text
results_txt = ''
for result in results:
#
results_txt = results_txt + result + ", "
results_txt = results_txt[:-2]
print('\thate speech:', results_txt)
return results_txt
# ******************************************************************************************
# an alternative method for implementing hate speech detection. it is based on spacy's
# phrase matcher.
def detect_hate_matcher(text, meta, lang):
file = open(data_path, 'r', encoding='utf-8')
data = file.read().splitlines()
file.close()
counter = 0
for datum in data:
# load text
try:
datum = json.loads(datum)['text']
counter += 1
except:
print('JSON error')
continue
# analyze text
lang = Translator().detect(datum[:100]).lang
print('{}. {}'.format(counter, datum[:100]))
print('The language is: {}'.format(lang))
if lang == 'el': nlp = spacy.load('el_core_news_sm')
elif lang == 'es': nlp = spacy.load('es_core_news_sm')
elif lang == 'it': nlp = spacy.load('it_core_news_sm')
elif lang == 'en': nlp = spacy.load('en_core_web_sm')
else: continue
# load dictionary
file = open('Dictionaries\\dictionary_' + lang + '.txt', 'r', encoding='utf-8')
terms = file.read().splitlines()
file.close()
# token matcher
# matcher = Matcher(nlp.vocab)
# pattern = [{"LOWER": "κάνει μήνυση"}, {"IS_PUNCT": True}]
# matcher.add("HelloWorld", None, pattern)
# phrase matcher
terms = ["Ερντογάν", "μήνυση", "μΗνες"]
for i in range(len(terms)):
for token in nlp(terms[i]):
terms[i] = utils.clean_accent(token.lemma_.lower())
print(terms)
# sys.exit()
matcher = PhraseMatcher(nlp.vocab)
patterns = [nlp.make_doc(text) for text in terms]
matcher.add("TerminologyList", None, *patterns)
datum_t = ''
for token in nlp(datum):
datum_t += utils.clean_accent(token.lemma_.lower()) + ' '
print(datum_t)
doc = nlp(datum_t)
matches = matcher(doc)
for match_id, start, end in matches:
string_id = nlp.vocab.strings[match_id] # Get string representation
span = doc[start:end] # The matched span
print(match_id, string_id, start, end, span.text)
# html = displacy.render(doc, style="ent", page=True, options={"ents": ["EVENT"]})
# print(terms_t)
# print(datum_t)
time.sleep(3)
# ******************************************************************************************
# a simple approach for suggesting frequent words found in texts. this can help for expanding
# the list of terms found in the dictionaries for filtering data for hate speech. this method
# can be used in texts that already have been marked as "hate speech".
def detect_terms(text, meta, lang):
print('* term detection')
# initialize results
results = []
# load the appropriate spacy model and isolate terms named entity gpe, loc, fac, org
if lang == 'en': nlp = spacy.load('en_core_web_sm')
elif lang == 'el' or lang == 'es' or lang == 'it': nlp = spacy.load(lang + '_core_news_sm')
else: return
# remove stopwords, punctuation marks and make characters lowercase
words = [token.lemma_.lower() for token in nlp(text) if token.is_stop != True and token.is_punct != True]
# count frequency of words
word_freq = Counter(words)
common_words = word_freq.most_common(5)
# print('\t', common_words)
# filter frequent terms
for common_word in common_words:
if common_word[1] >= 3 and len(common_word[0]) >= 3:
results.append(common_word[0])
# print('\tcommnon word:', common_word[0])
# transform results to text
results_txt = ''
for result in results:
#
results_txt = results_txt + result + ", "
results_txt = results_txt[:-2]
print('\tcommnon words:', results_txt)
return results_txt
# ******************************************************************************************
# a method for detecting geolocation from text. geopy with nominatim geocoder are used.
# entities in the following ranking are preferred: GPE (countries, cities, states), LOC
# (mountains, bodies of water), FAC (buildings, airports, highways etc.), ORG (companies,
# agancies, institutions etc.).
def detect_location(text, meta, lang):
print('* location detection')
# initialize results
results = []
# load the nominatim geopy geocoder
n = Nominatim(user_agent="http")
# load the appropriate spacy model and isolate terms named entity gpe, loc, fac, org
if lang == 'en': nlp = spacy.load('en_core_web_sm')
elif lang == 'el' or lang == 'es' or lang == 'it': nlp = spacy.load(lang + '_core_news_sm')
else: return
ents = nlp(text).ents
# find gpe entities
if len(results) == 0:
for ent in ents:
# print(ent.text, ent.start_char, ent.end_char, ent.label_) #label_ -> ORG, GPE, MONEY
if ent.label_ == 'GPE':
geo = n.geocode(ent.text, language='en')
if geo is not None:
results.append([ent.text, geo.raw["display_name"].split(",")[-1], geo.raw["lat"], geo.raw["lon"]])
# print('\tpossible locations (GPE):', results[-1])
# try for fac entities
if len(results) == 0:
for ent in ents:
if ent.label_ == 'FAC':
geo = n.geocode(ent.text, language='en')
if geo is not None:
results.append([ent.text, geo.raw["display_name"].split(",")[-1], geo.raw["lat"], geo.raw["lon"]])
# print('\tpossible locations (FAC):', results[-1])
# try for org entities
if len(results) == 0:
for ent in ents:
if ent.label_ == 'ORG':
geo = n.geocode(ent.text, language='en')
if geo is not None:
results.append([ent.text, geo.raw["display_name"].split(",")[-1], geo.raw["lat"], geo.raw["lon"]])
# print('\tpossible locations (ORG):', results[-1])
# try for loc entities
if len(results) == 0:
for ent in ents:
if ent.label_ == 'LOC':
geo = n.geocode(ent.text, language='en')
if geo is not None:
results.append([ent.text, geo.raw["display_name"].split(",")[-1], geo.raw["lat"], geo.raw["lon"]])
# print('\tpossible locations (LOC):', results[-1])
# estimate only one location
words = []
for result in results: words.append(utils.clean_whitespaces(result[1]))
word_freq = Counter(words)
common_words = word_freq.most_common(5)
results = []
for common_word in common_words:
results.append(common_word[0])
# break
#print('\testimated location:', results)
# transform results to text
results_txt = ''
for result in results:
#
results_txt = results_txt + result + ", "
results_txt = results_txt[:-2]
print('\testimated locations:', results_txt)
return results_txt
# ******************************************************************************************
# a pilot method for executing sentiment analysis. it will be used as the base for the
# upcoming sentiment analysis methods.
def analyze_sentiment(text, meta, lang):
nlp = English() # We only want the tokenizer, so no need to load a model
matcher = Matcher(nlp.vocab)
pos_emoji = ["😀", "😃", "😂", "🤣", "😊", "😍"] # Positive emoji
neg_emoji = ["😞", "😠", "😩", "😢", "😭", "😒"] # Negative emoji
# Add patterns to match one or more emoji tokens
pos_patterns = [[{"ORTH": emoji}] for emoji in pos_emoji]
neg_patterns = [[{"ORTH": emoji}] for emoji in neg_emoji]
# Function to label the sentiment
def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY": # Don't forget to get string!
doc.sentiment += 0.1 # Add 0.1 for positive sentiment
elif doc.vocab.strings[match_id] == "SAD":
doc.sentiment -= 0.1 # Subtract 0.1 for negative sentiment
matcher.add("HAPPY", label_sentiment, *pos_patterns) # Add positive pattern
matcher.add("SAD", label_sentiment, *neg_patterns) # Add negative pattern
# Add pattern for valid hashtag, i.e. '#' plus any ASCII token
matcher.add("HASHTAG", None, [{"ORTH": "#"}, {"IS_ASCII": True}])
doc = nlp("Hello world 😀 #MondayMotivation")
matches = matcher(doc)
for match_id, start, end in matches:
string_id = doc.vocab.strings[match_id] # Look up string ID
span = doc[start:end]
print(string_id, span.text)
# ******************************************************************************************
# a method for tfidf (term frequency–inverse document frequency) with nmf (non-negative
# matrix factorization) or lda (latent dirichlet allocation) is deployed for topic modeling.
# a list of topics is created based on a corpus of text items. detected topics and most
# common terms are printed. 'mode' can be set to 'nmf' or 'lda'. enable 'plot' to get graphs
# for common terms found in texts.
def topic_modeling(corpus, mode='nmf', plot=False):
# detect language
language = detect_language(corpus[0])
print('* topic modeling')
# initialize results
results = []
results_txt = ''
# remove unwanted words
if language == 'en':
#
stop_words = 'english'
elif language == 'es':
#
stop_words = ["0","1","2","3","4","5","6","7","8","9","_","a","actualmente","acuerdo","adelante","ademas","además","adrede","afirmó","agregó","ahi","ahora","ahí","al","algo","alguna","algunas","alguno","algunos","algún","alli","allí","alrededor","ambos","ampleamos","antano","antaño","ante","anterior","antes","apenas","aproximadamente","aquel","aquella","aquellas","aquello","aquellos","aqui","aquél","aquélla","aquéllas","aquéllos","aquí","arriba","arribaabajo","aseguró","asi","así","atras","aun","aunque","ayer","añadió","aún","b","bajo","bastante","bien","breve","buen","buena","buenas","bueno","buenos","c","cada","casi","cerca","cierta","ciertas","cierto","ciertos","cinco","claro","comentó","como","con","conmigo","conocer","conseguimos","conseguir","considera","consideró","consigo","consigue","consiguen","consigues","contigo","contra","cosas","creo","cual","cuales","cualquier","cuando","cuanta","cuantas","cuanto","cuantos","cuatro","cuenta","cuál","cuáles","cuándo","cuánta","cuántas","cuánto","cuántos","cómo","d","da","dado","dan","dar","de","debajo","debe","deben","debido","decir","dejó","del","delante","demasiado","demás","dentro","deprisa","desde","despacio","despues","después","detras","detrás","dia","dias","dice","dicen","dicho","dieron","diferente","diferentes","dijeron","dijo","dio","donde","dos","durante","día","días","dónde","e","ejemplo","el","ella","ellas","ello","ellos","embargo","empleais","emplean","emplear","empleas","empleo","en","encima","encuentra","enfrente","enseguida","entonces","entre","era","erais","eramos","eran","eras","eres","es","esa","esas","ese","eso","esos","esta","estaba","estabais","estaban","estabas","estad","estada","estadas","estado","estados","estais","estamos","estan","estando","estar","estaremos","estará","estarán","estarás","estaré","estaréis","estaría","estaríais","estaríamos","estarían","estarías","estas","este","estemos","esto","estos","estoy","estuve","estuviera","estuvierais","estuvieran","estuvieras","estuvieron","estuviese","estuvieseis","estuviesen","estuvieses","estuvimos","estuviste","estuvisteis","estuviéramos","estuviésemos","estuvo","está","estábamos","estáis","están","estás","esté","estéis","estén","estés","ex","excepto","existe","existen","explicó","expresó","f","fin","final","fue","fuera","fuerais","fueran","fueras","fueron","fuese","fueseis","fuesen","fueses","fui","fuimos","fuiste","fuisteis","fuéramos","fuésemos","g","general","gran","grandes","gueno","h","ha","haber","habia","habida","habidas","habido","habidos","habiendo","habla","hablan","habremos","habrá","habrán","habrás","habré","habréis","habría","habríais","habríamos","habrían","habrías","habéis","había","habíais","habíamos","habían","habías","hace","haceis","hacemos","hacen","hacer","hacerlo","haces","hacia","haciendo","hago","han","has","hasta","hay","haya","hayamos","hayan","hayas","hayáis","he","hecho","hemos","hicieron","hizo","horas","hoy","hube","hubiera","hubierais","hubieran","hubieras","hubieron","hubiese","hubieseis","hubiesen","hubieses","hubimos","hubiste","hubisteis","hubiéramos","hubiésemos","hubo","i","igual","incluso","indicó","informo","informó","intenta","intentais","intentamos","intentan","intentar","intentas","intento","ir","j","junto","k","l","la","lado","largo","las","le","lejos","les","llegó","lleva","llevar","lo","los","luego","lugar","m","mal","manera","manifestó","mas","mayor","me","mediante","medio","mejor","mencionó","menos","menudo","mi","mia","mias","mientras","mio","mios","mis","misma","mismas","mismo","mismos","modo","momento","mucha","muchas","mucho","muchos","muy","más","mí","mía","mías","mío","míos","n","nada","nadie","ni","ninguna","ningunas","ninguno","ningunos","ningún","no","nos","nosotras","nosotros","nuestra","nuestras","nuestro","nuestros","nueva","nuevas","nuevo","nuevos","nunca","o","ocho","os","otra","otras","otro","otros","p","pais","para","parece","parte","partir","pasada","pasado","paìs","peor","pero","pesar","poca","pocas","poco","pocos","podeis","podemos","poder","podria","podriais","podriamos","podrian","podrias","podrá","podrán","podría","podrían","poner","por","por qué","porque","posible","primer","primera","primero","primeros","principalmente","pronto","propia","propias","propio","propios","proximo","próximo","próximos","pudo","pueda","puede","pueden","puedo","pues","q","qeu","que","quedó","queremos","quien","quienes","quiere","quiza","quizas","quizá","quizás","quién","quiénes","qué","r","raras","realizado","realizar","realizó","repente","respecto","s","sabe","sabeis","sabemos","saben","saber","sabes","sal","salvo","se","sea","seamos","sean","seas","segun","segunda","segundo","según","seis","ser","sera","seremos","será","serán","serás","seré","seréis","sería","seríais","seríamos","serían","serías","seáis","señaló","si","sido","siempre","siendo","siete","sigue","siguiente","sin","sino","sobre","sois","sola","solamente","solas","solo","solos","somos","son","soy","soyos","su","supuesto","sus","suya","suyas","suyo","suyos","sé","sí","sólo","t","tal","tambien","también","tampoco","tan","tanto","tarde","te","temprano","tendremos","tendrá","tendrán","tendrás","tendré","tendréis","tendría","tendríais","tendríamos","tendrían","tendrías","tened","teneis","tenemos","tener","tenga","tengamos","tengan","tengas","tengo","tengáis","tenida","tenidas","tenido","tenidos","teniendo","tenéis","tenía","teníais","teníamos","tenían","tenías","tercera","ti","tiempo","tiene","tienen","tienes","toda","todas","todavia","todavía","todo","todos","total","trabaja","trabajais","trabajamos","trabajan","trabajar","trabajas","trabajo","tras","trata","través","tres","tu","tus","tuve","tuviera","tuvierais","tuvieran","tuvieras","tuvieron","tuviese","tuvieseis","tuviesen","tuvieses","tuvimos","tuviste","tuvisteis","tuviéramos","tuviésemos","tuvo","tuya","tuyas","tuyo","tuyos","tú","u","ultimo","un","una","unas","uno","unos","usa","usais","usamos","usan","usar","usas","uso","usted","ustedes","v","va","vais","valor","vamos","van","varias","varios","vaya","veces","ver","verdad","verdadera","verdadero","vez","vosotras","vosotros","voy","vuestra","vuestras","vuestro","vuestros","w","x","y","ya","yo","z","él","éramos","ésa","ésas","ése","ésos","ésta","éstas","éste","éstos","última","últimas","último","últimos"]
elif language == 'it':
#
stop_words = ["a","abbastanza","abbia","abbiamo","abbiano","abbiate","accidenti","ad","adesso","affinché","agl","agli","ahime","ahimè","ai","al","alcuna","alcuni","alcuno","all","alla","alle","allo","allora","altre","altri","altrimenti","altro","altrove","altrui","anche","ancora","anni","anno","ansa","anticipo","assai","attesa","attraverso","avanti","avemmo","avendo","avente","aver","avere","averlo","avesse","avessero","avessi","avessimo","aveste","avesti","avete","aveva","avevamo","avevano","avevate","avevi","avevo","avrai","avranno","avrebbe","avrebbero","avrei","avremmo","avremo","avreste","avresti","avrete","avrà","avrò","avuta","avute","avuti","avuto","basta","ben","bene","benissimo","brava","bravo","buono","c","caso","cento","certa","certe","certi","certo","che","chi","chicchessia","chiunque","ci","ciascuna","ciascuno","cima","cinque","cio","cioe","cioè","circa","citta","città","ciò","co","codesta","codesti","codesto","cogli","coi","col","colei","coll","coloro","colui","come","cominci","comprare","comunque","con","concernente","conclusione","consecutivi","consecutivo","consiglio","contro","cortesia","cos","cosa","cosi","così","cui","d","da","dagl","dagli","dai","dal","dall","dalla","dalle","dallo","dappertutto","davanti","degl","degli","dei","del","dell","della","delle","dello","dentro","detto","deve","devo","di","dice","dietro","dire","dirimpetto","diventa","diventare","diventato","dopo","doppio","dov","dove","dovra","dovrà","dovunque","due","dunque","durante","e","ebbe","ebbero","ebbi","ecc","ecco","ed","effettivamente","egli","ella","entrambi","eppure","era","erano","eravamo","eravate","eri","ero","esempio","esse","essendo","esser","essere","essi","ex","fa","faccia","facciamo","facciano","facciate","faccio","facemmo","facendo","facesse","facessero","facessi","facessimo","faceste","facesti","faceva","facevamo","facevano","facevate","facevi","facevo","fai","fanno","farai","faranno","fare","farebbe","farebbero","farei","faremmo","faremo","fareste","faresti","farete","farà","farò","fatto","favore","fece","fecero","feci","fin","finalmente","finche","fine","fino","forse","forza","fosse","fossero","fossi","fossimo","foste","fosti","fra","frattempo","fu","fui","fummo","fuori","furono","futuro","generale","gente","gia","giacche","giorni","giorno","giu","già","gli","gliela","gliele","glieli","glielo","gliene","grande","grazie","gruppo","ha","haha","hai","hanno","ho","i","ie","ieri","il","improvviso","in","inc","indietro","infatti","inoltre","insieme","intanto","intorno","invece","io","l","la","lasciato","lato","le","lei","li","lo","lontano","loro","lui","lungo","luogo","là","ma","macche","magari","maggior","mai","male","malgrado","malissimo","me","medesimo","mediante","meglio","meno","mentre","mesi","mezzo","mi","mia","mie","miei","mila","miliardi","milioni","minimi","mio","modo","molta","molti","moltissimo","molto","momento","mondo","ne","negl","negli","nei","nel","nell","nella","nelle","nello","nemmeno","neppure","nessun","nessuna","nessuno","niente","no","noi","nome","non","nondimeno","nonostante","nonsia","nostra","nostre","nostri","nostro","novanta","nove","nulla","nuovi","nuovo","o","od","oggi","ogni","ognuna","ognuno","oltre","oppure","ora","ore","osi","ossia","ottanta","otto","paese","parecchi","parecchie","parecchio","parte","partendo","peccato","peggio","per","perche","perchè","perché","percio","perciò","perfino","pero","persino","persone","però","piedi","pieno","piglia","piu","piuttosto","più","po","pochissimo","poco","poi","poiche","possa","possedere","posteriore","posto","potrebbe","preferibilmente","presa","press","prima","primo","principalmente","probabilmente","promesso","proprio","puo","pure","purtroppo","può","qua","qualche","qualcosa","qualcuna","qualcuno","quale","quali","qualunque","quando","quanta","quante","quanti","quanto","quantunque","quarto","quasi","quattro","quel","quella","quelle","quelli","quello","quest","questa","queste","questi","questo","qui","quindi","quinto","realmente","recente","recentemente","registrazione","relativo","riecco","rispetto","salvo","sara","sarai","saranno","sarebbe","sarebbero","sarei","saremmo","saremo","sareste","saresti","sarete","sarà","sarò","scola","scopo","scorso","se","secondo","seguente","seguito","sei","sembra","sembrare","sembrato","sembrava","sembri","sempre","senza","sette","si","sia","siamo","siano","siate","siete","sig","solito","solo","soltanto","sono","sopra","soprattutto","sotto","spesso","sta","stai","stando","stanno","starai","staranno","starebbe","starebbero","starei","staremmo","staremo","stareste","staresti","starete","starà","starò","stata","state","stati","stato","stava","stavamo","stavano","stavate","stavi","stavo","stemmo","stessa","stesse","stessero","stessi","stessimo","stesso","steste","stesti","stette","stettero","stetti","stia","stiamo","stiano","stiate","sto","su","sua","subito","successivamente","successivo","sue","sugl","sugli","sui","sul","sull","sulla","sulle","sullo","suo","suoi","tale","tali","talvolta","tanto","te","tempo","terzo","th","ti","titolo","tra","tranne","tre","trenta","triplo","troppo","trovato","tu","tua","tue","tuo","tuoi","tutta","tuttavia","tutte","tutti","tutto","uguali","ulteriore","ultimo","un","una","uno","uomo","va","vai","vale","vari","varia","varie","vario","verso","vi","vicino","visto","vita","voi","volta","volte","vostra","vostre","vostri","vostro","è"]
elif language == 'el':
#
stop_words = ["ένα","έναν","ένας","αι","ακομα","ακομη","ακριβως","αληθεια","αληθινα","αλλα","αλλαχου","αλλες","αλλη","αλλην","αλλης","αλλιως","αλλιωτικα","αλλο","αλλοι","αλλοιως","αλλοιωτικα","αλλον","αλλος","αλλοτε","αλλου","αλλους","αλλων","αμα","αμεσα","αμεσως","αν","ανα","αναμεσα","αναμεταξυ","ανευ","αντι","αντιπερα","αντις","ανω","ανωτερω","αξαφνα","απ","απεναντι","απο","αποψε","από","αρα","αραγε","αργα","αργοτερο","αριστερα","αρκετα","αρχικα","ας","αυριο","αυτα","αυτες","αυτεσ","αυτη","αυτην","αυτης","αυτο","αυτοι","αυτον","αυτος","αυτοσ","αυτου","αυτους","αυτουσ","αυτων","αφοτου","αφου","αἱ","αἳ","αἵ","αὐτόσ","αὐτὸς","αὖ","α∆ιακοπα","βεβαια","βεβαιοτατα","γάρ","γα","γα^","γε","γι","για","γοῦν","γρηγορα","γυρω","γὰρ","δ'","δέ","δή","δαί","δαίσ","δαὶ","δαὶς","δε","δεν","δι","δι'","διά","δια","διὰ","δὲ","δὴ","δ’","εαν","εαυτο","εαυτον","εαυτου","εαυτους","εαυτων","εγκαιρα","εγκαιρως","εγω","ειθε","ειμαι","ειμαστε","ειναι","εις","εισαι","εισαστε","ειστε","ειτε","ειχα","ειχαμε","ειχαν","ειχατε","ειχε","ειχες","ει∆εμη","εκ","εκαστα","εκαστες","εκαστη","εκαστην","εκαστης","εκαστο","εκαστοι","εκαστον","εκαστος","εκαστου","εκαστους","εκαστων","εκει","εκεινα","εκεινες","εκεινεσ","εκεινη","εκεινην","εκεινης","εκεινο","εκεινοι","εκεινον","εκεινος","εκεινοσ","εκεινου","εκεινους","εκεινουσ","εκεινων","εκτος","εμας","εμεις","εμενα","εμπρος","εν","ενα","εναν","ενας","ενος","εντελως","εντος","εντωμεταξυ","ενω","ενός","εξ","εξαφνα","εξης","εξισου","εξω","επ","επί","επανω","επειτα","επει∆η","επι","επισης","επομενως","εσας","εσεις","εσενα","εστω","εσυ","ετερα","ετεραι","ετερας","ετερες","ετερη","ετερης","ετερο","ετεροι","ετερον","ετερος","ετερου","ετερους","ετερων","ετουτα","ετουτες","ετουτη","ετουτην","ετουτης","ετουτο","ετουτοι","ετουτον","ετουτος","ετουτου","ετουτους","ετουτων","ετσι","ευγε","ευθυς","ευτυχως","εφεξης","εχει","εχεις","εχετε","εχθες","εχομε","εχουμε","εχουν","εχτες","εχω","εως","εἰ","εἰμί","εἰμὶ","εἰς","εἰσ","εἴ","εἴμι","εἴτε","ε∆ω","η","ημασταν","ημαστε","ημουν","ησασταν","ησαστε","ησουν","ηταν","ητανε","ητοι","ηττον","η∆η","θα","ι","ιι","ιιι","ισαμε","ισια","ισως","ισωσ","ι∆ια","ι∆ιαν","ι∆ιας","ι∆ιες","ι∆ιο","ι∆ιοι","ι∆ιον","ι∆ιος","ι∆ιου","ι∆ιους","ι∆ιων","ι∆ιως","κ","καί","καίτοι","καθ","καθε","καθεμια","καθεμιας","καθενα","καθενας","καθενος","καθετι","καθολου","καθως","και","κακα","κακως","καλα","καλως","καμια","καμιαν","καμιας","καμποσα","καμποσες","καμποση","καμποσην","καμποσης","καμποσο","καμποσοι","καμποσον","καμποσος","καμποσου","καμποσους","καμποσων","κανεις","κανεν","κανενα","κανεναν","κανενας","κανενος","καποια","καποιαν","καποιας","καποιες","καποιο","καποιοι","καποιον","καποιος","καποιου","καποιους","καποιων","καποτε","καπου","καπως","κατ","κατά","κατα","κατι","κατιτι","κατοπιν","κατω","κατὰ","καὶ","κι","κιολας","κλπ","κοντα","κτλ","κυριως","κἀν","κἂν","λιγακι","λιγο","λιγωτερο","λογω","λοιπα","λοιπον","μέν","μέσα","μή","μήτε","μία","μα","μαζι","μακαρι","μακρυα","μαλιστα","μαλλον","μας","με","μεθ","μεθαυριο","μειον","μελει","μελλεται","μεμιας","μεν","μερικα","μερικες","μερικοι","μερικους","μερικων","μεσα","μετ","μετά","μετα","μεταξυ","μετὰ","μεχρι","μη","μην","μηπως","μητε","μη∆ε","μιά","μια","μιαν","μιας","μολις","μολονοτι","μοναχα","μονες","μονη","μονην","μονης","μονο","μονοι","μονομιας","μονος","μονου","μονους","μονων","μου","μπορει","μπορουν","μπραβο","μπρος","μἐν","μὲν","μὴ","μὴν","να","ναι","νωρις","ξανα","ξαφνικα","ο","οι","ολα","ολες","ολη","ολην","ολης","ολο","ολογυρα","ολοι","ολον","ολονεν","ολος","ολοτελα","ολου","ολους","ολων","ολως","ολως∆ιολου","ομως","ομωσ","οποια","οποιαν","οποιαν∆ηποτε","οποιας","οποιας∆ηποτε","οποια∆ηποτε","οποιες","οποιες∆ηποτε","οποιο","οποιοι","οποιον","οποιον∆ηποτε","οποιος","οποιος∆ηποτε","οποιου","οποιους","οποιους∆ηποτε","οποιου∆ηποτε","οποιο∆ηποτε","οποιων","οποιων∆ηποτε","οποι∆ηποτε","οποτε","οποτε∆ηποτε","οπου","οπου∆ηποτε","οπως","οπωσ","ορισμενα","ορισμενες","ορισμενων","ορισμενως","οσα","οσα∆ηποτε","οσες","οσες∆ηποτε","οση","οσην","οσην∆ηποτε","οσης","οσης∆ηποτε","οση∆ηποτε","οσο","οσοι","οσοι∆ηποτε","οσον","οσον∆ηποτε","οσος","οσος∆ηποτε","οσου","οσους","οσους∆ηποτε","οσου∆ηποτε","οσο∆ηποτε","οσων","οσων∆ηποτε","οταν","οτι","οτι∆ηποτε","οτου","ου","ουτε","ου∆ε","οχι","οἱ","οἳ","οἷς","οὐ","οὐδ","οὐδέ","οὐδείσ","οὐδεὶς","οὐδὲ","οὐδὲν","οὐκ","οὐχ","οὐχὶ","οὓς","οὔτε","οὕτω","οὕτως","οὕτωσ","οὖν","οὗ","οὗτος","οὗτοσ","παλι","παντοτε","παντου","παντως","παρ","παρά","παρα","παρὰ","περί","περα","περι","περιπου","περισσοτερο","περσι","περυσι","περὶ","πια","πιθανον","πιο","πισω","πλαι","πλεον","πλην","ποια","ποιαν","ποιας","ποιες","ποιεσ","ποιο","ποιοι","ποιον","ποιος","ποιοσ","ποιου","ποιους","ποιουσ","ποιων","πολυ","ποσες","ποση","ποσην","ποσης","ποσοι","ποσος","ποσους","ποτε","που","πουθε","πουθενα","ποῦ","πρεπει","πριν","προ","προκειμενου","προκειται","προπερσι","προς","προσ","προτου","προχθες","προχτες","πρωτυτερα","πρόσ","πρὸ","πρὸς","πως","πωσ","σαν","σας","σε","σεις","σημερα","σιγα","σου","στα","στη","στην","στης","στις","στο","στον","στου","στους","στων","συγχρονως","συν","συναμα","συνεπως","συνηθως","συχνα","συχνας","συχνες","συχνη","συχνην","συχνης","συχνο","συχνοι","συχνον","συχνος","συχνου","συχνους","συχνων","συχνως","σχε∆ον","σωστα","σόσ","σύ","σύν","σὸς","σὺ","σὺν","τά","τήν","τί","τίς","τίσ","τα","ταυτα","ταυτες","ταυτη","ταυτην","ταυτης","ταυτο,ταυτον","ταυτος","ταυτου","ταυτων","ταχα","ταχατε","ταῖς","τα∆ε","τε","τελικα","τελικως","τες","τετοια","τετοιαν","τετοιας","τετοιες","τετοιο","τετοιοι","τετοιον","τετοιος","τετοιου","τετοιους","τετοιων","τη","την","της","τησ","τι","τινα","τιποτα","τιποτε","τις","τισ","το","τοί","τοι","τοιοῦτος","τοιοῦτοσ","τον","τος","τοσα","τοσες","τοση","τοσην","τοσης","τοσο","τοσοι","τοσον","τοσος","τοσου","τοσους","τοσων","τοτε","του","τουλαχιστο","τουλαχιστον","τους","τουτα","τουτες","τουτη","τουτην","τουτης","τουτο","τουτοι","τουτοις","τουτον","τουτος","τουτου","τουτους","τουτων","τούσ","τοὺς","τοῖς","τοῦ","τυχον","των","τωρα","τό","τόν","τότε","τὰ","τὰς","τὴν","τὸ","τὸν","τῆς","τῆσ","τῇ","τῶν","τῷ","υπ","υπερ","υπο","υποψη","υποψιν","υπό","υστερα","φετος","χαμηλα","χθες","χτες","χωρις","χωριστα","ψηλα","ω","ωραια","ως","ωσ","ωσαν","ωσοτου","ωσπου","ωστε","ωστοσο","ωχ","ἀλλ'","ἀλλά","ἀλλὰ","ἀλλ’","ἀπ","ἀπό","ἀπὸ","ἀφ","ἂν","ἃ","ἄλλος","ἄλλοσ","ἄν","ἄρα","ἅμα","ἐάν","ἐγώ","ἐγὼ","ἐκ","ἐμόσ","ἐμὸς","ἐν","ἐξ","ἐπί","ἐπεὶ","ἐπὶ","ἐστι","ἐφ","ἐὰν","ἑαυτοῦ","ἔτι","ἡ","ἢ","ἣ","ἤ","ἥ","ἧς","ἵνα","ὁ","ὃ","ὃν","ὃς","ὅ","ὅδε","ὅθεν","ὅπερ","ὅς","ὅσ","ὅστις","ὅστισ","ὅτε","ὅτι","ὑμόσ","ὑπ","ὑπέρ","ὑπό","ὑπὲρ","ὑπὸ","ὡς","ὡσ","ὥς","ὥστε","ὦ","ᾧ","∆α","∆ε","∆εινα","∆εν","∆εξια","∆ηθεν","∆ηλα∆η","∆ι","∆ια","∆ιαρκως","∆ικα","∆ικο","∆ικοι","∆ικος","∆ικου","∆ικους","∆ιολου","∆ιπλα","∆ιχως"]
# perform the analysis
no_features = 1000
no_top_words = 3
no_topics = 3
if mode == 'nmf':
# tfidf vectorizer and nmf
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)
tfidf = vectorizer.fit_transform(corpus)
feature_names = vectorizer.get_feature_names()
model = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)
elif mode == 'lda':
# count vectorizer and LDA
vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)
tf = vectorizer.fit_transform(corpus)
feature_names = vectorizer.get_feature_names()
model = LatentDirichletAllocation(n_components=no_topics, max_iter=5, learning_method='online', learning_offset=50., random_state=0).fit(tf)
else:
print('\tplease select a valid option for mode (\"tfidf-nmf\" or \"tf-lda\")')
return None, None
# display common words
if plot:
vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)
tf = vectorizer.fit_transform(corpus)
bigram_vectorizer = CountVectorizer(ngram_range=(2, 2), stop_words=stop_words)
bigrams = bigram_vectorizer.fit_transform(corpus)
trigram_vectorizer = CountVectorizer(ngram_range=(3, 3), stop_words=stop_words)
trigrams = trigram_vectorizer.fit_transform(corpus)
n_top = 10
plot_common_words(tf, vectorizer,n_top,'words')
plot_common_words(bigrams, bigram_vectorizer, n_top, 'bigrams')
plot_common_words(trigrams, trigram_vectorizer, n_top, 'trigrams')
# print topics
for topic_idx, topic in enumerate(model.components_):
# print("\ttopic %d:" % (topic_idx+1), ', '.join([tfidf_feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]]))
results_txt = results_txt + '('
for i in topic.argsort()[:-no_top_words - 1:-1]:
# results.append(tfidf_feature_names[i])
results_txt = results_txt + feature_names[i] + ', '
results_txt = results_txt[:-2] + '), '
results_txt = results_txt[:-2]
print('\ttopics detected via {}: {}'.format(mode, results_txt))
return results, results_txt
# ******************************************************************************************
# a method for topic modeling along with named entity detection. common entities are
# returned. 'mode' can be set to 'nmf' or 'lda'.
def entity_modeling(corpus, mode='nmf'):
# detect language
language = detect_language(corpus[0])
print('* entity modeling')
# initialize results
results = []
results_txt = ''
# remove unwanted words
if language == 'en':
#
stop_words = 'english'
elif language == 'es':
#
stop_words = ["0","1","2","3","4","5","6","7","8","9","_","a","actualmente","acuerdo","adelante","ademas","además","adrede","afirmó","agregó","ahi","ahora","ahí","al","algo","alguna","algunas","alguno","algunos","algún","alli","allí","alrededor","ambos","ampleamos","antano","antaño","ante","anterior","antes","apenas","aproximadamente","aquel","aquella","aquellas","aquello","aquellos","aqui","aquél","aquélla","aquéllas","aquéllos","aquí","arriba","arribaabajo","aseguró","asi","así","atras","aun","aunque","ayer","añadió","aún","b","bajo","bastante","bien","breve","buen","buena","buenas","bueno","buenos","c","cada","casi","cerca","cierta","ciertas","cierto","ciertos","cinco","claro","comentó","como","con","conmigo","conocer","conseguimos","conseguir","considera","consideró","consigo","consigue","consiguen","consigues","contigo","contra","cosas","creo","cual","cuales","cualquier","cuando","cuanta","cuantas","cuanto","cuantos","cuatro","cuenta","cuál","cuáles","cuándo","cuánta","cuántas","cuánto","cuántos","cómo","d","da","dado","dan","dar","de","debajo","debe","deben","debido","decir","dejó","del","delante","demasiado","demás","dentro","deprisa","desde","despacio","despues","después","detras","detrás","dia","dias","dice","dicen","dicho","dieron","diferente","diferentes","dijeron","dijo","dio","donde","dos","durante","día","días","dónde","e","ejemplo","el","ella","ellas","ello","ellos","embargo","empleais","emplean","emplear","empleas","empleo","en","encima","encuentra","enfrente","enseguida","entonces","entre","era","erais","eramos","eran","eras","eres","es","esa","esas","ese","eso","esos","esta","estaba","estabais","estaban","estabas","estad","estada","estadas","estado","estados","estais","estamos","estan","estando","estar","estaremos","estará","estarán","estarás","estaré","estaréis","estaría","estaríais","estaríamos","estarían","estarías","estas","este","estemos","esto","estos","estoy","estuve","estuviera","estuvierais","estuvieran","estuvieras","estuvieron","estuviese","estuvieseis","estuviesen","estuvieses","estuvimos","estuviste","estuvisteis","estuviéramos","estuviésemos","estuvo","está","estábamos","estáis","están","estás","esté","estéis","estén","estés","ex","excepto","existe","existen","explicó","expresó","f","fin","final","fue","fuera","fuerais","fueran","fueras","fueron","fuese","fueseis","fuesen","fueses","fui","fuimos","fuiste","fuisteis","fuéramos","fuésemos","g","general","gran","grandes","gueno","h","ha","haber","habia","habida","habidas","habido","habidos","habiendo","habla","hablan","habremos","habrá","habrán","habrás","habré","habréis","habría","habríais","habríamos","habrían","habrías","habéis","había","habíais","habíamos","habían","habías","hace","haceis","hacemos","hacen","hacer","hacerlo","haces","hacia","haciendo","hago","han","has","hasta","hay","haya","hayamos","hayan","hayas","hayáis","he","hecho","hemos","hicieron","hizo","horas","hoy","hube","hubiera","hubierais","hubieran","hubieras","hubieron","hubiese","hubieseis","hubiesen","hubieses","hubimos","hubiste","hubisteis","hubiéramos","hubiésemos","hubo","i","igual","incluso","indicó","informo","informó","intenta","intentais","intentamos","intentan","intentar","intentas","intento","ir","j","junto","k","l","la","lado","largo","las","le","lejos","les","llegó","lleva","llevar","lo","los","luego","lugar","m","mal","manera","manifestó","mas","mayor","me","mediante","medio","mejor","mencionó","menos","menudo","mi","mia","mias","mientras","mio","mios","mis","misma","mismas","mismo","mismos","modo","momento","mucha","muchas","mucho","muchos","muy","más","mí","mía","mías","mío","míos","n","nada","nadie","ni","ninguna","ningunas","ninguno","ningunos","ningún","no","nos","nosotras","nosotros","nuestra","nuestras","nuestro","nuestros","nueva","nuevas","nuevo","nuevos","nunca","o","ocho","os","otra","otras","otro","otros","p","pais","para","parece","parte","partir","pasada","pasado","paìs","peor","pero","pesar","poca","pocas","poco","pocos","podeis","podemos","poder","podria","podriais","podriamos","podrian","podrias","podrá","podrán","podría","podrían","poner","por","por qué","porque","posible","primer","primera","primero","primeros","principalmente","pronto","propia","propias","propio","propios","proximo","próximo","próximos","pudo","pueda","puede","pueden","puedo","pues","q","qeu","que","quedó","queremos","quien","quienes","quiere","quiza","quizas","quizá","quizás","quién","quiénes","qué","r","raras","realizado","realizar","realizó","repente","respecto","s","sabe","sabeis","sabemos","saben","saber","sabes","sal","salvo","se","sea","seamos","sean","seas","segun","segunda","segundo","según","seis","ser","sera","seremos","será","serán","serás","seré","seréis","sería","seríais","seríamos","serían","serías","seáis","señaló","si","sido","siempre","siendo","siete","sigue","siguiente","sin","sino","sobre","sois","sola","solamente","solas","solo","solos","somos","son","soy","soyos","su","supuesto","sus","suya","suyas","suyo","suyos","sé","sí","sólo","t","tal","tambien","también","tampoco","tan","tanto","tarde","te","temprano","tendremos","tendrá","tendrán","tendrás","tendré","tendréis","tendría","tendríais","tendríamos","tendrían","tendrías","tened","teneis","tenemos","tener","tenga","tengamos","tengan","tengas","tengo","tengáis","tenida","tenidas","tenido","tenidos","teniendo","tenéis","tenía","teníais","teníamos","tenían","tenías","tercera","ti","tiempo","tiene","tienen","tienes","toda","todas","todavia","todavía","todo","todos","total","trabaja","trabajais","trabajamos","trabajan","trabajar","trabajas","trabajo","tras","trata","través","tres","tu","tus","tuve","tuviera","tuvierais","tuvieran","tuvieras","tuvieron","tuviese","tuvieseis","tuviesen","tuvieses","tuvimos","tuviste","tuvisteis","tuviéramos","tuviésemos","tuvo","tuya","tuyas","tuyo","tuyos","tú","u","ultimo","un","una","unas","uno","unos","usa","usais","usamos","usan","usar","usas","uso","usted","ustedes","v","va","vais","valor","vamos","van","varias","varios","vaya","veces","ver","verdad","verdadera","verdadero","vez","vosotras","vosotros","voy","vuestra","vuestras","vuestro","vuestros","w","x","y","ya","yo","z","él","éramos","ésa","ésas","ése","ésos","ésta","éstas","éste","éstos","última","últimas","último","últimos"]
elif language == 'it':
#
stop_words = ["a","abbastanza","abbia","abbiamo","abbiano","abbiate","accidenti","ad","adesso","affinché","agl","agli","ahime","ahimè","ai","al","alcuna","alcuni","alcuno","all","alla","alle","allo","allora","altre","altri","altrimenti","altro","altrove","altrui","anche","ancora","anni","anno","ansa","anticipo","assai","attesa","attraverso","avanti","avemmo","avendo","avente","aver","avere","averlo","avesse","avessero","avessi","avessimo","aveste","avesti","avete","aveva","avevamo","avevano","avevate","avevi","avevo","avrai","avranno","avrebbe","avrebbero","avrei","avremmo","avremo","avreste","avresti","avrete","avrà","avrò","avuta","avute","avuti","avuto","basta","ben","bene","benissimo","brava","bravo","buono","c","caso","cento","certa","certe","certi","certo","che","chi","chicchessia","chiunque","ci","ciascuna","ciascuno","cima","cinque","cio","cioe","cioè","circa","citta","città","ciò","co","codesta","codesti","codesto","cogli","coi","col","colei","coll","coloro","colui","come","cominci","comprare","comunque","con","concernente","conclusione","consecutivi","consecutivo","consiglio","contro","cortesia","cos","cosa","cosi","così","cui","d","da","dagl","dagli","dai","dal","dall","dalla","dalle","dallo","dappertutto","davanti","degl","degli","dei","del","dell","della","delle","dello","dentro","detto","deve","devo","di","dice","dietro","dire","dirimpetto","diventa","diventare","diventato","dopo","doppio","dov","dove","dovra","dovrà","dovunque","due","dunque","durante","e","ebbe","ebbero","ebbi","ecc","ecco","ed","effettivamente","egli","ella","entrambi","eppure","era","erano","eravamo","eravate","eri","ero","esempio","esse","essendo","esser","essere","essi","ex","fa","faccia","facciamo","facciano","facciate","faccio","facemmo","facendo","facesse","facessero","facessi","facessimo","faceste","facesti","faceva","facevamo","facevano","facevate","facevi","facevo","fai","fanno","farai","faranno","fare","farebbe","farebbero","farei","faremmo","faremo","fareste","faresti","farete","farà","farò","fatto","favore","fece","fecero","feci","fin","finalmente","finche","fine","fino","forse","forza","fosse","fossero","fossi","fossimo","foste","fosti","fra","frattempo","fu","fui","fummo","fuori","furono","futuro","generale","gente","gia","giacche","giorni","giorno","giu","già","gli","gliela","gliele","glieli","glielo","gliene","grande","grazie","gruppo","ha","haha","hai","hanno","ho","i","ie","ieri","il","improvviso","in","inc","indietro","infatti","inoltre","insieme","intanto","intorno","invece","io","l","la","lasciato","lato","le","lei","li","lo","lontano","loro","lui","lungo","luogo","là","ma","macche","magari","maggior","mai","male","malgrado","malissimo","me","medesimo","mediante","meglio","meno","mentre","mesi","mezzo","mi","mia","mie","miei","mila","miliardi","milioni","minimi","mio","modo","molta","molti","moltissimo","molto","momento","mondo","ne","negl","negli","nei","nel","nell","nella","nelle","nello","nemmeno","neppure","nessun","nessuna","nessuno","niente","no","noi","nome","non","nondimeno","nonostante","nonsia","nostra","nostre","nostri","nostro","novanta","nove","nulla","nuovi","nuovo","o","od","oggi","ogni","ognuna","ognuno","oltre","oppure","ora","ore","osi","ossia","ottanta","otto","paese","parecchi","parecchie","parecchio","parte","partendo","peccato","peggio","per","perche","perchè","perché","percio","perciò","perfino","pero","persino","persone","però","piedi","pieno","piglia","piu","piuttosto","più","po","pochissimo","poco","poi","poiche","possa","possedere","posteriore","posto","potrebbe","preferibilmente","presa","press","prima","primo","principalmente","probabilmente","promesso","proprio","puo","pure","purtroppo","può","qua","qualche","qualcosa","qualcuna","qualcuno","quale","quali","qualunque","quando","quanta","quante","quanti","quanto","quantunque","quarto","quasi","quattro","quel","quella","quelle","quelli","quello","quest","questa","queste","questi","questo","qui","quindi","quinto","realmente","recente","recentemente","registrazione","relativo","riecco","rispetto","salvo","sara","sarai","saranno","sarebbe","sarebbero","sarei","saremmo","saremo","sareste","saresti","sarete","sarà","sarò","scola","scopo","scorso","se","secondo","seguente","seguito","sei","sembra","sembrare","sembrato","sembrava","sembri","sempre","senza","sette","si","sia","siamo","siano","siate","siete","sig","solito","solo","soltanto","sono","sopra","soprattutto","sotto","spesso","sta","stai","stando","stanno","starai","staranno","starebbe","starebbero","starei","staremmo","staremo","stareste","staresti","starete","starà","starò","stata","state","stati","stato","stava","stavamo","stavano","stavate","stavi","stavo","stemmo","stessa","stesse","stessero","stessi","stessimo","stesso","steste","stesti","stette","stettero","stetti","stia","stiamo","stiano","stiate","sto","su","sua","subito","successivamente","successivo","sue","sugl","sugli","sui","sul","sull","sulla","sulle","sullo","suo","suoi","tale","tali","talvolta","tanto","te","tempo","terzo","th","ti","titolo","tra","tranne","tre","trenta","triplo","troppo","trovato","tu","tua","tue","tuo","tuoi","tutta","tuttavia","tutte","tutti","tutto","uguali","ulteriore","ultimo","un","una","uno","uomo","va","vai","vale","vari","varia","varie","vario","verso","vi","vicino","visto","vita","voi","volta","volte","vostra","vostre","vostri","vostro","è"]
elif language == 'el':
#
stop_words = ["ένα","έναν","ένας","αι","ακομα","ακομη","ακριβως","αληθεια","αληθινα","αλλα","αλλαχου","αλλες","αλλη","αλλην","αλλης","αλλιως","αλλιωτικα","αλλο","αλλοι","αλλοιως","αλλοιωτικα","αλλον","αλλος","αλλοτε","αλλου","αλλους","αλλων","αμα","αμεσα","αμεσως","αν","ανα","αναμεσα","αναμεταξυ","ανευ","αντι","αντιπερα","αντις","ανω","ανωτερω","αξαφνα","απ","απεναντι","απο","αποψε","από","αρα","αραγε","αργα","αργοτερο","αριστερα","αρκετα","αρχικα","ας","αυριο","αυτα","αυτες","αυτεσ","αυτη","αυτην","αυτης","αυτο","αυτοι","αυτον","αυτος","αυτοσ","αυτου","αυτους","αυτουσ","αυτων","αφοτου","αφου","αἱ","αἳ","αἵ","αὐτόσ","αὐτὸς","αὖ","α∆ιακοπα","βεβαια","βεβαιοτατα","γάρ","γα","γα^","γε","γι","για","γοῦν","γρηγορα","γυρω","γὰρ","δ'","δέ","δή","δαί","δαίσ","δαὶ","δαὶς","δε","δεν","δι","δι'","διά","δια","διὰ","δὲ","δὴ","δ’","εαν","εαυτο","εαυτον","εαυτου","εαυτους","εαυτων","εγκαιρα","εγκαιρως","εγω","ειθε","ειμαι","ειμαστε","ειναι","εις","εισαι","εισαστε","ειστε","ειτε","ειχα","ειχαμε","ειχαν","ειχατε","ειχε","ειχες","ει∆εμη","εκ","εκαστα","εκαστες","εκαστη","εκαστην","εκαστης","εκαστο","εκαστοι","εκαστον","εκαστος","εκαστου","εκαστους","εκαστων","εκει","εκεινα","εκεινες","εκεινεσ","εκεινη","εκεινην","εκεινης","εκεινο","εκεινοι","εκεινον","εκεινος","εκεινοσ","εκεινου","εκεινους","εκεινουσ","εκεινων","εκτος","εμας","εμεις","εμενα","εμπρος","εν","ενα","εναν","ενας","ενος","εντελως","εντος","εντωμεταξυ","ενω","ενός","εξ","εξαφνα","εξης","εξισου","εξω","επ","επί","επανω","επειτα","επει∆η","επι","επισης","επομενως","εσας","εσεις","εσενα","εστω","εσυ","ετερα","ετεραι","ετερας","ετερες","ετερη","ετερης","ετερο","ετεροι","ετερον","ετερος","ετερου","ετερους","ετερων","ετουτα","ετουτες","ετουτη","ετουτην","ετουτης","ετουτο","ετουτοι","ετουτον","ετουτος","ετουτου","ετουτους","ετουτων","ετσι","ευγε","ευθυς","ευτυχως","εφεξης","εχει","εχεις","εχετε","εχθες","εχομε","εχουμε","εχουν","εχτες","εχω","εως","εἰ","εἰμί","εἰμὶ","εἰς","εἰσ","εἴ","εἴμι","εἴτε","ε∆ω","η","ημασταν","ημαστε","ημουν","ησασταν","ησαστε","ησουν","ηταν","ητανε","ητοι","ηττον","η∆η","θα","ι","ιι","ιιι","ισαμε","ισια","ισως","ισωσ","ι∆ια","ι∆ιαν","ι∆ιας","ι∆ιες","ι∆ιο","ι∆ιοι","ι∆ιον","ι∆ιος","ι∆ιου","ι∆ιους","ι∆ιων","ι∆ιως","κ","καί","καίτοι","καθ","καθε","καθεμια","καθεμιας","καθενα","καθενας","καθενος","καθετι","καθολου","καθως","και","κακα","κακως","καλα","καλως","καμια","καμιαν","καμιας","καμποσα","καμποσες","καμποση","καμποσην","καμποσης","καμποσο","καμποσοι","καμποσον","καμποσος","καμποσου","καμποσους","καμποσων","κανεις","κανεν","κανενα","κανεναν","κανενας","κανενος","καποια","καποιαν","καποιας","καποιες","καποιο","καποιοι","καποιον","καποιος","καποιου","καποιους","καποιων","καποτε","καπου","καπως","κατ","κατά","κατα","κατι","κατιτι","κατοπιν","κατω","κατὰ","καὶ","κι","κιολας","κλπ","κοντα","κτλ","κυριως","κἀν","κἂν","λιγακι","λιγο","λιγωτερο","λογω","λοιπα","λοιπον","μέν","μέσα","μή","μήτε","μία","μα","μαζι","μακαρι","μακρυα","μαλιστα","μαλλον","μας","με","μεθ","μεθαυριο","μειον","μελει","μελλεται","μεμιας","μεν","μερικα","μερικες","μερικοι","μερικους","μερικων","μεσα","μετ","μετά","μετα","μεταξυ","μετὰ","μεχρι","μη","μην","μηπως","μητε","μη∆ε","μιά","μια","μιαν","μιας","μολις","μολονοτι","μοναχα","μονες","μονη","μονην","μονης","μονο","μονοι","μονομιας","μονος","μονου","μονους","μονων","μου","μπορει","μπορουν","μπραβο","μπρος","μἐν","μὲν","μὴ","μὴν","να","ναι","νωρις","ξανα","ξαφνικα","ο","οι","ολα","ολες","ολη","ολην","ολης","ολο","ολογυρα","ολοι","ολον","ολονεν","ολος","ολοτελα","ολου","ολους","ολων","ολως","ολως∆ιολου","ομως","ομωσ","οποια","οποιαν","οποιαν∆ηποτε","οποιας","οποιας∆ηποτε","οποια∆ηποτε","οποιες","οποιες∆ηποτε","οποιο","οποιοι","οποιον","οποιον∆ηποτε","οποιος","οποιος∆ηποτε","οποιου","οποιους","οποιους∆ηποτε","οποιου∆ηποτε","οποιο∆ηποτε","οποιων","οποιων∆ηποτε","οποι∆ηποτε","οποτε","οποτε∆ηποτε","οπου","οπου∆ηποτε","οπως","οπωσ","ορισμενα","ορισμενες","ορισμενων","ορισμενως","οσα","οσα∆ηποτε","οσες","οσες∆ηποτε","οση","οσην","οσην∆ηποτε","οσης","οσης∆ηποτε","οση∆ηποτε","οσο","οσοι","οσοι∆ηποτε","οσον","οσον∆ηποτε","οσος","οσος∆ηποτε","οσου","οσους","οσους∆ηποτε","οσου∆ηποτε","οσο∆ηποτε","οσων","οσων∆ηποτε","οταν","οτι","οτι∆ηποτε","οτου","ου","ουτε","ου∆ε","οχι","οἱ","οἳ","οἷς","οὐ","οὐδ","οὐδέ","οὐδείσ","οὐδεὶς","οὐδὲ","οὐδὲν","οὐκ","οὐχ","οὐχὶ","οὓς","οὔτε","οὕτω","οὕτως","οὕτωσ","οὖν","οὗ","οὗτος","οὗτοσ","παλι","παντοτε","παντου","παντως","παρ","παρά","παρα","παρὰ","περί","περα","περι","περιπου","περισσοτερο","περσι","περυσι","περὶ","πια","πιθανον","πιο","πισω","πλαι","πλεον","πλην","ποια","ποιαν","ποιας","ποιες","ποιεσ","ποιο","ποιοι","ποιον","ποιος","ποιοσ","ποιου","ποιους","ποιουσ","ποιων","πολυ","ποσες","ποση","ποσην","ποσης","ποσοι","ποσος","ποσους","ποτε","που","πουθε","πουθενα","ποῦ","πρεπει","πριν","προ","προκειμενου","προκειται","προπερσι","προς","προσ","προτου","προχθες","προχτες","πρωτυτερα","πρόσ","πρὸ","πρὸς","πως","πωσ","σαν","σας","σε","σεις","σημερα","σιγα","σου","στα","στη","στην","στης","στις","στο","στον","στου","στους","στων","συγχρονως","συν","συναμα","συνεπως","συνηθως","συχνα","συχνας","συχνες","συχνη","συχνην","συχνης","συχνο","συχνοι","συχνον","συχνος","συχνου","συχνους","συχνων","συχνως","σχε∆ον","σωστα","σόσ","σύ","σύν","σὸς","σὺ","σὺν","τά","τήν","τί","τίς","τίσ","τα","ταυτα","ταυτες","ταυτη","ταυτην","ταυτης","ταυτο,ταυτον","ταυτος","ταυτου","ταυτων","ταχα","ταχατε","ταῖς","τα∆ε","τε","τελικα","τελικως","τες","τετοια","τετοιαν","τετοιας","τετοιες","τετοιο","τετοιοι","τετοιον","τετοιος","τετοιου","τετοιους","τετοιων","τη","την","της","τησ","τι","τινα","τιποτα","τιποτε","τις","τισ","το","τοί","τοι","τοιοῦτος","τοιοῦτοσ","τον","τος","τοσα","τοσες","τοση","τοσην","τοσης","τοσο","τοσοι","τοσον","τοσος","τοσου","τοσους","τοσων","τοτε","του","τουλαχιστο","τουλαχιστον","τους","τουτα","τουτες","τουτη","τουτην","τουτης","τουτο","τουτοι","τουτοις","τουτον","τουτος","τουτου","τουτους","τουτων","τούσ","τοὺς","τοῖς","τοῦ","τυχον","των","τωρα","τό","τόν","τότε","τὰ","τὰς","τὴν","τὸ","τὸν","τῆς","τῆσ","τῇ","τῶν","τῷ","υπ","υπερ","υπο","υποψη","υποψιν","υπό","υστερα","φετος","χαμηλα","χθες","χτες","χωρις","χωριστα","ψηλα","ω","ωραια","ως","ωσ","ωσαν","ωσοτου","ωσπου","ωστε","ωστοσο","ωχ","ἀλλ'","ἀλλά","ἀλλὰ","ἀλλ’","ἀπ","ἀπό","ἀπὸ","ἀφ","ἂν","ἃ","ἄλλος","ἄλλοσ","ἄν","ἄρα","ἅμα","ἐάν","ἐγώ","ἐγὼ","ἐκ","ἐμόσ","ἐμὸς","ἐν","ἐξ","ἐπί","ἐπεὶ","ἐπὶ","ἐστι","ἐφ","ἐὰν","ἑαυτοῦ","ἔτι","ἡ","ἢ","ἣ","ἤ","ἥ","ἧς","ἵνα","ὁ","ὃ","ὃν","ὃς","ὅ","ὅδε","ὅθεν","ὅπερ","ὅς","ὅσ","ὅστις","ὅστισ","ὅτε","ὅτι","ὑμόσ","ὑπ","ὑπέρ","ὑπό","ὑπὲρ","ὑπὸ","ὡς","ὡσ","ὥς","ὥστε","ὦ","ᾧ","∆α","∆ε","∆εινα","∆εν","∆εξια","∆ηθεν","∆ηλα∆η","∆ι","∆ια","∆ιαρκως","∆ικα","∆ικο","∆ικοι","∆ικος","∆ικου","∆ικους","∆ιολου","∆ιπλα","∆ιχως"]
# perform the analysis
no_features = 1000
no_topics = 5
no_top_words = 10
if mode == 'nmf':
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)
tfidf = tfidf_vectorizer.fit_transform(corpus)
feature_names = tfidf_vectorizer.get_feature_names()
model = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)
elif mode == 'lda':
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)
tf = tf_vectorizer.fit_transform(corpus)
feature_names = tf_vectorizer.get_feature_names()
model = LatentDirichletAllocation(n_components=no_topics, max_iter=5, learning_method='online', learning_offset=50., random_state=0).fit(tf)
else:
print('\tplease select a valid option for mode (\"tfidf-nmf\" or \"tf-lda\")')
return None, None
# form the intermediate results
for topic_idx, topic in enumerate(model.components_):
# print("\ttopic %d:" % (topic_idx+1), ', '.join([feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]]))
for i in topic.argsort()[:-no_top_words - 1:-1]:
# results.append(feature_names[i])
results_txt = results_txt + ' ' + feature_names[i]
# print(results_txt)
# load the appropriate spacy model and isolate terms named entity gpe, loc, fac, org
if language == 'en': nlp = spacy.load('en_core_web_sm')
elif language in ['el', 'es', 'it']: nlp = spacy.load(language + '_core_news_sm')
else: return
for ent in nlp(results_txt).ents:
#
if ent.label_ in ['GPE', 'FAC', 'ORG', 'LOC']: results.append(ent.text)
# reform results
results_txt = ''
for result in results: results_txt = results_txt + result + ', '
results_txt = results_txt[:-2]
print('\tcommon entities found:', results_txt)
return results, results_txt
''' HELPER METHODS '''
# ******************************************************************************************
# a helper method for topic modeling methods to list the detected topics.
def print_topics(model, feature_names, no_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic %d:" % (topic_idx))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]]))
# ******************************************************************************************
# a helper method for topic modeling methods to plot most common words.
def plot_common_words(count_data, count_vectorizer,n_top, n_grams_string):
sns.set_style('whitegrid')
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[0:n_top]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15 / 1.6180))
plt.subplot(title=str(n_top)+' most common '+n_grams_string)
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.show() # Initialise the count vectorizer with the English stop words
''' SUPPLEMENTARY METHODS '''
# ******************************************************************************************
# a method primarily for testing various workflows and techniques. it is mostly based on the
# spacy library for executiong common nlp tasks.
def analyze_syntax(text):
print('Syntax analysis')
print(' ')
word = TextBlob(text)
lang = word.detect_language()
print('The language is:', lang)
if lang == 'el': nlp = spacy.load('el_core_news_sm')
elif lang == 'es': nlp = spacy.load('es_core_news_sm')
elif lang == 'it': nlp = spacy.load('it_core_news_sm')
else: nlp = spacy.load('en_core_web_sm')
nlp_text = nlp(text)
# Extract sentences
sentences = list(nlp_text.sents)
print(' ')
print('Sentences:', len(sentences))
for sentence in sentences:
#
print('#', sentence)
# Extract tokens
print(' ')
print('Tokens:', len(nlp_text))
print('Lemma | Root | POS | Position | Shape | Alphabetic? | Stop? ')
for token in nlp_text:
print('#', token.text, token.lemma_, token.pos_, token.dep_, token.shape_, token.is_alpha, token.is_stop)
# spacy.explain(token.tag_)token.tag_
# Noun chunks
print(' ')
print('Noun chunks analysis')
print('Chunk | Root | POS | Head ')
for chunk in nlp_text.noun_chunks:
print('#', chunk.text, ' | ', chunk.root.text, ' | ', chunk.root.dep_, ' | ', chunk.root.head.text)
print(' ')
print(' ')
|
[
"noreply@github.com"
] |
thepharmproject.noreply@github.com
|
869b5fd5178646d0fc46c0a6bbeb3b3da0b6dd60
|
cadfe9016ef3e5c80864e43e362978a20e178635
|
/yonggari/bin/flask
|
e31de6272712f806fd3299bbb8098676b7c68e36
|
[] |
no_license
|
shd02137/Capsin
|
3dcd874739b2401939d532b45984478e61ddb954
|
f9c8f5ab4262c74f92319b7086c2dd12f87d4928
|
refs/heads/master
| 2020-06-06T23:50:18.742120
| 2019-06-22T03:10:15
| 2019-06-22T03:10:15
| 192,878,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
#!/home/ubuntu/yonggari/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ubuntu@ip-172-31-32-191.eu-central-1.compute.internal"
] |
ubuntu@ip-172-31-32-191.eu-central-1.compute.internal
|
|
f06554613a70283c660786e9291d533016e5d630
|
77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5
|
/chelseashin/SamsungProblems/swea/5648_원자소멸시뮬레이션.py
|
cc8bae929ee498ec7773632e1e400d457c44844c
|
[] |
no_license
|
chelseashin/AlgorithmStudy2021
|
786f03c4c17bc057518d428481e7d710d24ec98e
|
1a4744a621ed25715fc9060c5224f0b1092d9c00
|
refs/heads/master
| 2023-06-22T22:27:47.289806
| 2021-07-28T02:54:22
| 2021-07-28T02:54:22
| 326,441,667
| 1
| 5
| null | 2021-06-29T01:27:40
| 2021-01-03T15:44:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
import sys
sys.stdin = open("5648_input.txt")
# 상하좌우
dr = (-1, 1, 0, 0)
dc = (0, 0, -1, 1)
def bfs():
global Q
result = 0
while Q:
info = dict()
qlen = len(Q)
for _ in range(qlen):
r, c, d, k = Q.pop(0)
nr = r + dr[d] * 0.5
nc = c + dc[d] * 0.5
# 격자 밖으로 나가면 더이상 충돌할 수 없기 때문에 그냥 두기
if not (0 <= nr < 2001 and 0 <= nc < 2001):
continue
if (nr, nc) not in info.keys(): # 첫 방문인 경우
info[(nr, nc)] = [d, k]
else: # 이미 어떤 원자가 간 곳이면
info[(nr, nc)][0] = -1
info[(nr, nc)][1] += k
for (r, c), (d, k) in info.items():
if d == -1: # 처음 도착해 충돌된 원자 소멸 처리
result += k
else:
Q.append((r, c, d, k))
return result
T = int(input())
for tc in range(T):
N = int(input())
Q = []
for i in range(N):
c, r, d, k = map(int, input().split())
# 격자를 수학적으로 일반적인 이차원 배열에 놓인 위치로 계산
r = 2000 - (1000 + r)
c = (1000 + c)
Q.append((r, c, d, k))
print("#{} {}".format(tc + 1, bfs()))
|
[
"chaewonshin95@gmail.com"
] |
chaewonshin95@gmail.com
|
27b84fae089803dd8cfa0f93fafc593876299e77
|
e404efdf471cb2644d8c913b10c3fd6596e3fa6a
|
/db_conservatory/spinner/spindocker.py
|
de4928f93b959d6d681307526bf2b49c5627e7be
|
[] |
no_license
|
atbaker/db-conservatory
|
ba064d91d072aea5662983c1d59fe05d5b24757e
|
1d13af0818160252935e5f60150d4adb9dd6e87c
|
refs/heads/master
| 2016-09-10T10:58:59.658846
| 2014-03-17T21:09:08
| 2014-03-17T21:09:08
| 14,132,172
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from django.conf import settings
from urlparse import urljoin
import requests
from requests.exceptions import RequestException
import logging
endpoint = settings.SPIN_DOCKER_ENDPOINT
auth = (settings.SPIN_DOCKER_USERNAME, settings.SPIN_DOCKER_PASSWORD)
logger = logging.getLogger(__name__)
def make_request(method, resource, data=None):
url = urljoin(endpoint, resource)
try:
if method == 'GET':
r = requests.get(url,
auth=auth,
)
elif method == 'POST':
r = requests.post(url,
auth=auth,
data=data)
elif method == 'PATCH':
r = requests.patch(url,
auth=auth,
data=data)
elif method == 'DELETE':
r = requests.delete(url,
auth=auth)
except RequestException:
logger.error('Spin-docker error at resource: %s' % resource)
return None
try:
response = r.json()
except ValueError:
logger.error('Spin-docker returned invalid JSON: %s %s %s' % (resource, r.status_code, r.text))
return None
return response
def get(resource):
return make_request('GET', resource)
def post(resource, data):
return make_request('POST', resource, data)
def patch(resource, data):
return make_request('PATCH', resource, data)
def delete(resource):
return make_request('DELETE', resource)
|
[
"andrew.tork.baker@gmail.com"
] |
andrew.tork.baker@gmail.com
|
cbba9fb914aea86720743a88dcb2e7acd88a728a
|
5873116e2b2b11506f7820aa25a118ce5e29485e
|
/test/constants.py
|
d1bbe08fd0e02c8dc233add002188903e7927296
|
[
"MIT"
] |
permissive
|
omegawulf417/macaddress
|
a7cf8722ebebafc16c4ce0e47ab447c0f1856810
|
748f8a8e86b864a9e53a1d7c2082b1aa6f4d0126
|
refs/heads/master
| 2023-07-08T04:35:29.680923
| 2021-08-01T01:56:34
| 2021-08-01T01:56:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,190
|
py
|
INVALID_OCTET = [
"f", # Too few digits
"fff", # Too many digits
"g" # Invalid digit
]
OCTET = [
("A0", "a0", 160, "10100000", "00000101"),
("a0", "a0", 160, "10100000", "00000101"),
("B1", "b1", 177, "10110001", "10001101"),
("b1", "b1", 177, "10110001", "10001101"),
("C2", "c2", 194, "11000010", "01000011"),
("c2", "c2", 194, "11000010", "01000011"),
("D3", "d3", 211, "11010011", "11001011"),
("d3", "d3", 211, "11010011", "11001011"),
("E4", "e4", 228, "11100100", "00100111"),
("e4", "e4", 228, "11100100", "00100111"),
("F5", "f5", 245, "11110101", "10101111"),
("f5", "f5", 245, "11110101", "10101111")
]
INVALID_IDENTIFIER = [
"0a", # Too few digits
"0a1b2c3d4e5f6", # Too many digits
"0a1b2c3d4e5g", # Invalid digit
"-0a-1b-2c-3d-4e-5f", # Leading hyphen
"0a-1b-2c-3d-4e-5f-", # Trailing hyphen
"0a-1b-2c-3d-4e5f", # Missing hyphen
":0a:1b:2c:3d:4e:5f", # Leading colon
"0a:1b:2c:3d:4e:5f:", # Trailing colon
"0a:1b:2c:3d:4e5f", # Missing colon
".0a1b.2c3d.4e5f", # Leading dot
"0a1b.2c3d.4e5f.", # Trailing dot
"0a1b.2c3d4e5f" # Missing dot
]
EUI = [
(
"a0b1c2d3e4f5", # Plain notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0B1C2D3E4F5", # Plain notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0-b1-c2-d3-e4-f5", # Hyphen notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0-B1-C2-D3-E4-F5", # Hyphen notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0:b1:c2:d3:e4:f5", # Colon notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0:B1:C2:D3:E4:F5", # Colon notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"a0b1.c2d3.e4f5", # Dot notation (lowercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
),
(
"A0B1.C2D3.E4F5", # Dot notation (uppercase)
"a0b1c2d3e4f5",
176685338322165,
"101000001011000111000010110100111110010011110101",
"000001011000110101000011110010110010011110101111",
("a0b1c2", "d3e4f5"),
("a0b1c2d3e", "4f5"),
"a0b1c2d3e4f5",
"a0-b1-c2-d3-e4-f5",
"a0:b1:c2:d3:e4:f5",
"a0b1.c2d3.e4f5"
)
]
ELI = [
(
"0a1b2c3d4e5f", # Plain notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A1B2C3D4E5F", # Plain notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a-1b-2c-3d-4e-5f", # Hyphen notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A-1B-2C-3D-4E-5F", # Hyphen notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a:1b:2c:3d:4e:5f", # Colon notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A:1B:2C:3D:4E:5F", # Colon notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0a1b.2c3d.4e5f", # Dot notation (lowercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
),
(
"0A1B.2C3D.4E5F", # Dot notation (uppercase)
"0a1b2c3d4e5f",
11111822610015,
"000010100001101100101100001111010100111001011111",
"010100001101100000110100101111000111001011111010",
("0a1b2c", "3d4e5f"),
("0a1b2c3d4", "e5f"),
"0a1b2c3d4e5f",
"0a-1b-2c-3d-4e-5f",
"0a:1b:2c:3d:4e:5f",
"0a1b.2c3d.4e5f"
)
]
NULL_EUI = [
(
"ffffffffffff", # Plain notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FFFFFFFFFFFF", # Plain notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ff-ff-ff-ff-ff-ff", # Hyphen notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FF-FF-FF-FF-FF-FF", # Hyphen notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ff:ff:ff:ff:ff:ff", # Colon notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FF:FF:FF:FF:FF:FF", # Colon notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"ffff.ffff.ffff", # Dot notation (lowercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
),
(
"FFFF.FFFF.FFFF", # Dot notation (uppercase)
"ffffffffffff",
281474976710655,
"111111111111111111111111111111111111111111111111",
"111111111111111111111111111111111111111111111111",
("ffffff", "ffffff"),
("fffffffff", "fff"),
"ffffffffffff",
"ff-ff-ff-ff-ff-ff",
"ff:ff:ff:ff:ff:ff",
"ffff.ffff.ffff"
)
]
INVALID_ADDRESS = INVALID_IDENTIFIER
BROADCAST = "ffffffffffff"
MULTICAST = "0180c2000000" # Link-Layer Discovery Protocol
UAA_UNICAST = "a0b1c2d3e4f5"
LAA_UNICAST = "aab1c2d3e4f5"
|
[
"37241479+critical-path@users.noreply.github.com"
] |
37241479+critical-path@users.noreply.github.com
|
546f4df214336ace4610fc7efce5fa322f8c0244
|
9a797f9a9c1c9fc8d94f7d87bbf49a021f7f499e
|
/paddlespeech/s2t/utils/profiler.py
|
3592157dc17eac5991bbaabdc1b757b7198827ef
|
[
"Apache-2.0"
] |
permissive
|
anniyanvr/DeepSpeech-1
|
38fb0764c18ef4ee54a5b4bcc1430b69b1434318
|
17854a04d43c231eff66bfed9d6aa55e94a29e79
|
refs/heads/develop
| 2023-09-01T20:02:07.336091
| 2023-08-14T02:11:45
| 2023-08-14T02:11:45
| 218,518,285
| 0
| 0
|
Apache-2.0
| 2023-09-13T09:54:36
| 2019-10-30T12:04:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
if not options_str:
return
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
logger.info(f"Profiler: {options_str}")
logger.info(f"Profiler: {_profiler_options._options}")
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(_profiler_options['state'],
_profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
|
[
"zhtclz@foxmail.com"
] |
zhtclz@foxmail.com
|
3ac1365a8800ed508c6f33e04084810988bc2508
|
b0948c315a879a6fd6ae5542ed4146e54ba4f0d2
|
/Some analytics/Analytics_diagram_all.py
|
d3a6a1b604bd360243b9207e06e065692e27586d
|
[] |
no_license
|
Borys1307/Borys
|
8fe05ee757c54f19d4f5ef82cf1dbb92264dd5cf
|
5a29299717d695609d960473f8ba4d9576f6384d
|
refs/heads/master
| 2020-06-14T02:13:04.886772
| 2020-03-23T12:40:26
| 2020-03-23T12:40:26
| 194,863,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import json
name = []
number = []
with open('C:/Users/chernyshov/Python/data.json', 'r') as fp:
resp = json.load(fp)
for i in resp['result']['StableDedicated']['Manual']['WhereReported'].items():
name.append(i[0])
number.append(i[1])
data = {'Tracker' : name, 'count':number }
cold_lost = pd.DataFrame(data)
total = (resp['result']['StableDedicated']['Manual']['Total'])
print(total)
cold_lost = (cold_lost.sort_values(by = ['count'], ascending = False))
cold_lost = cold_lost.reset_index(drop=True)
cold_lost.index = np.arange(1,len(cold_lost)+1)
percent = dict()
for i in cold_lost['Tracker']:
percent[i] = cold_lost.loc[cold_lost['Tracker'] == i].iloc[0]['count']/total*100
print(percent)
diagram for all
answer = pd.DataFrame(percent, index = [0])
dpi = 80
plt.figure(dpi = dpi, figsize = (640 / dpi, 480 / dpi) )
plt.pie(answer.values[0], autopct='%.2f', radius = 1.5,
);
plt.legend(
bbox_to_anchor = (-0.36, -0.17, 1.25, 0.25),
loc = 'lower left', labels = answer.keys())
plt.savefig('C:/Users/chernyshov/Python/Plots/Percentage_to_total.png')
plt.show()
print(cold_lost)
|
[
"noreply@github.com"
] |
Borys1307.noreply@github.com
|
818f9e6e2b44bf243ecb781eeedf33adb72cda9c
|
02ca588d6c8f5b4223071b80b27b98a0abd1f5ee
|
/woocommerce.py
|
ba1b1671e16ff162ec5a69e62cc67d1588f66d9c
|
[] |
no_license
|
phamjmanh/v32
|
00528b8799a6381529b8778e470c68d6cc4a5f83
|
e550d8e6167413523197592385c8e4b9a146f22e
|
refs/heads/main
| 2023-02-11T11:05:50.175775
| 2021-01-04T07:01:35
| 2021-01-04T07:01:35
| 326,593,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279,495
|
py
|
import copy
import csv
import re
from collections import defaultdict
from urllib.parse import unquote
import chardet
from cartmigration.libs.utils import *
from cartmigration.models.cart.wordpress import LeCartWordpress
# tested with woocommerce335
class LeCartWoocommerce(LeCartWordpress):
WARNING_VARIANT_LIMIT = 100
def __init__(self, data = None):
super().__init__(data)
self.product_types = dict()
self.is_variant_limit = False
def display_config_source(self):
parent = super().display_config_source()
url_query = self.get_connector_url('query')
self._notice['src']['language_default'] = 1
self._notice['src']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['src']['language_default']] = 0
self._notice['src']['store_category'] = storage_cat_data
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['category_map'] = False
self._notice['src']['support']['attribute_map'] = False
self._notice['src']['support']['wpml'] = False
self._notice['src']['support']['yoast_seo'] = False
self._notice['src']['support']['manufacturers'] = False
self._notice['src']['support']['product_bundle'] = False
self._notice['src']['support']['customer_point_rewards'] = False
self._notice['src']['support']['addons'] = False
self._notice['src']['support']['plugin_pre_ord'] = False
self._notice['src']['support']['plugin_order_status'] = False
self._notice['src']['support']['custom_order_status'] = False
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data:
if "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['src']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['src']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
else:
self._notice['src']['support']['wpml'] = False
if 'woocommerce-brand/main.php' in active_plugin_v_data or "wc-brand/woocommerce-brand.php" in active_plugin_v_data or 'woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data or 'perfect-woocommerce-brands/perfect-woocommerce-brands.php' in active_plugin_v_data:
self._notice['src']['support']['manufacturers'] = True
if "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['src']['support']['yoast_seo'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['src']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['src']['support']['customer_point_rewards'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['src']['support']['addons'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['src']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and 'woocommerce-order-status-manager/woocommerce-order-status-manager.php' in active_plugin_v_data:
self._notice['src']['support']['plugin_order_status'] = True
if active_plugin_v_data and 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['src']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
},
'permalink_structure': {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE option_name = 'woocommerce_permalinks' OR option_name = 'category_base'",
}
}
if self._notice['src']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG'"
}
if self._notice['src']['support']['plugin_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_order_status'"
if self._notice['src']['support']['custom_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
config = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(queries_config)
})
language_data = dict()
order_status_data = dict()
product_base = 'product'
product_category_base = 'product-category'
category_base = ''
if config and config['result'] == 'success':
if config['data']['orders_status']:
for order_status_row in config['data']['orders_status']:
# order_status_id = 'wc-' + order_status_row['name'].lower()
# order_status_data[order_status_id] = order_status_row['name']
if self._notice['src']['support']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif self._notice['src']['support']['plugin_order_status']:
order_status_id = order_status_row['post_name']
order_status_data[order_status_id] = order_status_row['post_title']
else:
order_status_id = order_status_row['post_status']
order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['src']['support']['wpml']:
if not self._notice['src']['language_default'] and 'default_lang' in config['data'] and config['data'][
'default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['src']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
if config['data']['permalink_structure']:
product_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'woocommerce_permalinks')
category_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'category_base')
if product_base_data:
option_value_data = php_unserialize(product_base_data['option_value'])
if option_value_data:
product_base = get_value_by_key_in_dict(option_value_data, 'product_base', 'product')
product_category_base = get_value_by_key_in_dict(option_value_data, 'category_base', 'product-category')
if category_base_data:
category_base = category_base_data['option_value']
self._notice['src']['config']['category_base'] = product_category_base
self._notice['src']['config']['product_category_base'] = product_category_base
self._notice['src']['config']['product_base'] = product_base
self._notice['src']['support']['language_map'] = True
self._notice['src']['languages'] = language_data
self._notice['src']['order_status'] = order_status_data
self._notice['src']['support']['order_status_map'] = True
self._notice['src']['support']['country_map'] = False
self._notice['src']['support']['add_new'] = True
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['customer_group_map'] = False
self._notice['src']['support']['languages_select'] = True
self._notice['src']['support']['order_state_map'] = True
self._notice['src']['support']['seo'] = True
if self.is_woo2woo():
self._notice['src']['support']['cus_pass'] = False
else:
self._notice['src']['support']['cus_pass'] = True
self._notice['src']['support']['coupons'] = True
self._notice['src']['support']['pages'] = True
self._notice['src']['support']['seo_301'] = True
self._notice['src']['config']['seo_module'] = self.get_list_seo()
return response_success()
def display_config_target(self):
url_query = self.get_connector_url('query')
self._notice['target']['language_default'] = 1
self._notice['target']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['target']['language_default']] = 0
self._notice['target']['store_category'] = storage_cat_data
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['category_map'] = False
self._notice['target']['support']['attribute_map'] = False
self._notice['target']['support']['wpml'] = False
self._notice['target']['support']['wpml_currency'] = False
self._notice['target']['support']['product_bundle'] = False
self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['addons'] = False
self._notice['target']['support']['customer_point_rewards'] = False
self._notice['target']['support']['polylang'] = False
self._notice['target']['support']['polylang_product'] = False
self._notice['target']['support']['polylang_category'] = False
self._notice['target']['support']['plugin_woo_admin'] = False
self._notice['target']['support']['custom_order_status'] = False
self._notice['target']['currency_map'] = dict()
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
query_active_currency = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = '_wcml_settings'"
}
options_currency_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_currency)})
if options_currency_data and options_currency_data['data']:
currency_value = php_unserialize(options_currency_data['data'][0]['option_value'])
if currency_value and 'enable_multi_currency' in currency_value and to_int(currency_value['enable_multi_currency']) >= 2:
self._notice['target']['support']['wpml_currency'] = True
if 'default_currencies' in currency_value and currency_value['default_currencies']:
self._notice['target']['currency_map'] = currency_value['default_currencies']
else:
self._notice['target']['support']['wpml_currency'] = False
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'martfury-addons/martfury-addons.php', 'taxonomy': 'product_brand'},
{'name': 'woocommerce-brands/woocommerce-brands.php', 'taxonomy': 'product_brand'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
{'name': 'perfect-woocommerce-brands/perfect-woocommerce-brands.php', 'taxonomy': 'pwb-brand'},
]
self._notice['target']['config']['brand_taxonomy'] = 'product_brand'
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = True
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
# if ('woocommerce-brand/main.php' in active_plugin_v_data) or ("wc-brand/woocommerce-brand.php" in active_plugin_v_data) or ('woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data) or ('brands-for-woocommerce/woocommerce-brand.php' in active_plugin_v_data):
# self._notice['target']['support']['manufacturers'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
# query_check_seo = {
# 'type': 'select',
# 'query': "SHOW TABLES LIKE '_DBPRF_lecm_rewrite';"
# }
# check_table_exit = self.select_data_connector(query_check_seo, 'seo')
# if check_table_exit['result'] == 'success' and to_len(check_table_exit['data']) > 0:
# self._notice['target']['support']['seo_301'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['target']['support']['customer_point_rewards'] = True
# if 'polylang/polylang.php' in active_plugin_v_data and 'polylang-wc/polylang-wc.php' in active_plugin_v_data:
if 'polylang/polylang.php' in active_plugin_v_data:
self._notice['target']['support']['polylang'] = True
if 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['target']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
},
}
if self._notice['target']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG' and o.`option_value` != '' "
}
if self._notice['target']['support']['polylang']:
queries_config['polylang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'language'"
}
queries_config['polylang_categories'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'term_language'"
}
if self._notice['target']['support']['custom_order_status']:
queries_config['custom_order_status'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
}
config = self.select_multiple_data_connector(queries_config)
if 'polylang' in config['data'] and not config['data']['polylang']:
self._notice['target']['support']['polylang'] = False
language_data = dict()
order_status_data = dict()
polylang_products = dict()
polylang_categories = dict()
if config and config['result'] == 'success':
if self._notice['target']['support']['custom_order_status'] and config['data']['custom_order_status'] and to_len(config['data']['custom_order_status']) > 0:
for order_status_row in config['data']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif config['data']['orders_status'] and to_len(config['data']['orders_status']) > 0:
for order_status_row in config['data']['orders_status']:
order_status_id = 'wc-' + to_str(order_status_row['name']).lower()
order_status_data[order_status_id] = order_status_row['name']
# order_status_id = order_status_row['post_status']
# order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['target']['support']['wpml']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
elif self._notice['target']['support']['polylang']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'polylang' in config['data']:
if config['data']['polylang']:
self._notice['target']['language_default'] = 'en'
for lang_row in config['data']['polylang']:
lang_id = lang_row['slug']
language_data[lang_id] = lang_row['name']
lang_product = lang_row['slug']
polylang_products[lang_product] = lang_row['term_taxonomy_id']
if config['data']['polylang_categories']:
for lang_row in config['data']['polylang_categories']:
lang_category = lang_row['slug'].replace('pll_', '')
polylang_categories[lang_category] = lang_row['term_taxonomy_id']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
lang_id = 1
language_data[lang_id] = "Default language"
self._notice['target']['support']['manufacturers'] = True
self._notice['target']['support']['check_manufacturers'] = True
# self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['pre_ord'] = True
self._notice['target']['support']['check_pre_ord'] = True
self._notice['target']['support']['seo'] = True
self._notice['target']['support']['check_seo'] = True
self._notice['target']['support']['seo_301'] = True
self._notice['target']['support']['check_seo_301'] = True
self._notice['target']['support']['cus_pass'] = True
self._notice['target']['support']['check_cus_pass'] = True
self._notice['target']['support']['language_map'] = True
self._notice['target']['languages'] = language_data
self._notice['target']['order_status'] = order_status_data
self._notice['target']['support']['order_status_map'] = True
self._notice['target']['support']['country_map'] = False
self._notice['target']['support']['add_new'] = True
self._notice['target']['support']['coupons'] = True
self._notice['target']['support']['blogs'] = True
self._notice['target']['support']['pages'] = True
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['pre_prd'] = False
self._notice['target']['support']['pre_cus'] = False
self._notice['target']['support']['img_des'] = True
self._notice['target']['support']['customer_group_map'] = False
self._notice['target']['support']['languages_select'] = True
self._notice['target']['support']['update_latest_data'] = True
self._notice['target']['config']['entity_update']['products'] = True
self._notice['target']['support']['polylang_product'] = polylang_products
self._notice['target']['support']['polylang_category'] = polylang_categories
return response_success()
def get_query_display_import_source(self, update = False):
compare_condition = ' > '
if update:
compare_condition = ' <= '
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
queries = {
'taxes': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'",
},
'manufacturers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE (taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand') AND term_id " + compare_condition + to_str(self._notice['process']['manufacturers']['id_src']),
},
'categories': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE taxonomy = 'product_cat' AND term_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
},
'products': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['products']['id_src']),
},
'customers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND u.ID " + compare_condition + to_str(
self._notice['process']['customers']['id_src']),
},
'orders': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['orders']['id_src']),
},
'reviews': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_comments AS cm,_DBPRF_posts AS p WHERE cm.comment_post_ID = p.ID AND p.post_type = 'product' AND cm.comment_ID " + compare_condition + to_str(
self._notice['process']['reviews']['id_src']),
},
'pages': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'page' AND ID " + compare_condition + to_str(self._notice['process']['pages']['id_src']),
},
'coupons': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_coupon' AND ID " + compare_condition + to_str(self._notice['process']['coupons']['id_src']),
},
'blogs': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'post' AND ID " + compare_condition + to_str(self._notice['process']['blogs']['id_src']),
},
}
if self._notice['src']['support']['wpml']:
queries['categories'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# self._notice['process']['categories']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_taxonomy_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
}
queries['products'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE p.`ID` and il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID > " + to_str(
# self._notice['process']['products']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID " + compare_condition + to_str(self._notice['process']['products']['id_src']),
}
return queries
def display_import_source(self):
if self._notice['config']['add_new']:
self.display_recent_data()
queries = self.get_query_display_import_source()
count = self.get_connector_data(self.get_connector_url('query'), {
'serialize': True,
'query': json.dumps(queries)
})
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total'] = total
return response_success()
def display_update_source(self):
queries = self.get_query_display_import_source(True)
count = self.select_multiple_data_connector(queries, 'count')
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total_update'] = total
return response_success()
def display_import_target(self):
return response_success()
def prepare_import_target(self):
parent = super().prepare_import_target()
if parent['result'] != 'success':
return parent
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'woocommerce-brands/woocommerce-brands.php'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
]
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = False
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if self._notice['target']['support']['wpml'] or self._notice['target']['support'].get('polylang'):
add_column = "ALTER TABLE " + self.get_table_name(TABLE_MAP) + " ADD `lang` VARCHAR(255)"
self.query_raw(add_column)
add_column = "ALTER TABLE _DBPRF_lecm_rewrite ADD `lang` VARCHAR(255)"
self.query_data_connector({'type': 'query', 'query': add_column})
return response_success()
def display_confirm_target(self):
self._notice['target']['clear']['function'] = 'clear_target_taxes'
self._notice['target']['clear_demo']['function'] = 'clear_target_products_demo'
return response_success()
# TODO clear demo
def clear_target_manufacturers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['manufacturers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_MANUFACTURER
}
manufacturers = self.select_obj(TABLE_MAP, where)
manufacturer_ids = list()
if manufacturers['result'] == 'success':
manufacturer_ids = duplicate_field_value_from_list(manufacturers['data'], 'id_desc')
if not manufacturer_ids:
return next_clear
manufacturer_id_con = self.list_to_in_condition(manufacturer_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' AND term_id IN " + manufacturer_id_con
}
manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if manufacturers['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return self._notice['target']['clear_demo']
def clear_target_categories_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['categories']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CATEGORY
}
categories = self.select_obj(TABLE_MAP, where)
category_ids = list()
if categories['result'] == 'success':
category_ids = duplicate_field_value_from_list(categories['data'], 'id_desc')
if not category_ids:
return next_clear
category_id_con = self.list_to_in_condition(category_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' AND term_id IN " + category_id_con
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if categories['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'tax_product_cat' AND element_id IN " + category_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category' and type_id IN " + category_id_con
})
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return next_clear
def clear_target_products_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
if not self._notice['config']['products']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_PRODUCT
}
products = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
product_ids = list()
if products['result'] == 'success':
product_ids = duplicate_field_value_from_list(products['data'], 'id_desc')
if not product_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
product_id_con = self.list_to_in_condition(product_ids)
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` "
"WHERE ID IN " + product_id_con + " OR post_parent IN " + product_id_con
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(collections_query)})
all_post_id = list()
if products['data']:
all_post_id = duplicate_field_value_from_list(products['data'], 'ID')
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` "
"WHERE ID IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_post_meta`"
" WHERE post_id IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
where = {
'migration_id': self._migration_id,
'type': self.TYPE_OPTION
}
attibutes = self.select_obj(TABLE_MAP, where)
attibutes_ids = list()
attibutes_codes = list()
if attibutes['result'] == 'success':
attibutes_ids = duplicate_field_value_from_list(attibutes['data'], 'id_desc')
attibutes_codes = duplicate_field_value_from_list(attibutes['data'], 'value')
if attibutes_ids:
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attibutes_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + self.list_to_in_condition(attibutes_codes)
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'post_product' AND element_id IN " + product_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product' and type_id IN " + product_id_con
})
})
self.delete_map_demo(self.TYPE_PRODUCT, product_ids)
if product_ids and to_len(product_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_customers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['customers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CUSTOMER
}
customers = self.select_obj(TABLE_MAP, where)
customer_ids = list()
if customers['result'] == 'success':
customer_ids = duplicate_field_value_from_list(customers['data'], 'id_desc')
if not customer_ids:
return next_clear
customer_id_con = self.list_to_in_condition(customer_ids)
del_user_query = "DELETE FROM _DBPRF_users WHERE ID IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE FROM _DBPRF_usermeta WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE FROM _DBPRF_wc_customer_lookup WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
return next_clear
def clear_target_orders_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_reviews_demo',
}
if not self._notice['config']['orders']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_ORDER
}
orders = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
order_ids = list()
if orders['result'] == 'success':
order_ids = duplicate_field_value_from_list(orders['data'], 'id_desc')
if not order_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') AND ID IN " + self.list_to_in_condition(
order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE FROM `_DBPRF_post_meta` WHERE post_id IN " + self.list_to_in_condition(order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
self.delete_map_demo(self.TYPE_ORDER, order_ids)
if order_ids and to_len(order_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_reviews_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_pages_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['reviews']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_REVIEW
}
reviews = self.select_obj(TABLE_MAP, where)
review_ids = list()
if reviews['result'] == 'success':
review_ids = duplicate_field_value_from_list(reviews['data'], 'id_desc')
if not review_ids:
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
where = ''
if table == 'comments':
where = " WHERE comment_ID IN " + self.list_to_in_condition(review_ids)
if table == 'commentmeta':
where = " WHERE comment_id IN " + self.list_to_in_condition(review_ids)
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`" + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# TODO: clear
def clear_target_taxes(self):
next_clear = {
'result': 'process',
'function': 'clear_target_manufacturers',
'msg': ''
}
if not self._notice['config']['taxes']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'options',
'woocommerce_tax_rates',
'woocommerce_tax_rate_locations',
'wc_tax_rate_classes'
]
for table in tables:
if table == 'options':
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "UPDATE `_DBPRF_" + table + "` SET `option_value` = '' WHERE `option_name` = 'woocommerce_tax_classes'"
})
})
continue
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + table + "` WHERE 1"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_manufacturers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories',
'msg': ''
}
if not self._notice['config']['manufacturers']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
tables = ['termmeta', 'terms', 'term_relationships', 'term_taxonomy']
for table in tables:
where = ''
if table in ['termmeta', 'terms']:
where = " term_id IN (SELECT term_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table in ['term_relationships']:
where = " term_taxonomy_id IN (SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table == 'term_taxonomy':
where = " taxonomy = " + self.escape(taxonomy)
query = "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
clear_table = self.query_data_connector({'type': 'delete', 'query': query})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# if manufacturers:
# while manufacturers['data']:
# if not manufacturers:
# return next_clear
# term_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
# all_queries = list()
# taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
# taxonomy_ids)
# })
# if all_queries:
# self.import_multiple_data_connector(all_queries, 'cleardemo')
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if taxonomy in option_value:
option_value[taxonomy] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'manufacturer')
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_categories(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products',
'msg': ''
}
if not self._notice['config']['categories']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
while self._check_categories_exists():
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 200"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if not categories:
return next_clear
term_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
taxnomy_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(taxnomy_query)})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
})})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
})
# end for
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type = 'tax_product_cat'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category'"
})
})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if 'product_cat' in option_value:
option_value['product_cat'] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'category')
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_categories_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 1"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if categories['data'] else False
def _check_product_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('product', 'product_variation') LIMIT 1"
}
# products = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
products = self.select_data_connector(all_collections_query, 'products')
return True if products['data'] else False
def _check_attributes_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_products(self):
next_clear = {
'result': 'process',
'function': 'clear_target_customers',
'msg': ''
}
if not self._notice['config']['products']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_product_exists():
# clear posts(product)
# clear meta post(product)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN('product', 'product_variation')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_postmeta` WHERE post_id NOT IN (SELECT ID FROM _DBPRF_posts)"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear attributes
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
while self._check_attributes_exists():
product_attribute_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
attributes = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(product_attribute_query)})
if (attributes['data']):
attribute_ids = duplicate_field_value_from_list(attributes['data'], 'attribute_id')
attribute_names = duplicate_field_value_from_list(attributes['data'], 'attribute_name')
attribute_names_condition = "('pa_" + "','pa_".join(attribute_names) + "')"
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attribute_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + attribute_names_condition
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type IN ('post_product','post_product_variation'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product'"
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_customers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders',
'msg': ''
}
if not self._notice['config']['customers']:
self._notice['target']['clear'] = next_clear
return next_clear
# "DELETE FROM `wp_usermeta`
# WHERE meta_key IN ('wp_capabilities', 'wp_capabilities') AND meta_value = 'a:1:{s:8:"customer";b:1;}'"
del_user_query = "DELETE _DBPRF_users FROM _DBPRF_users " \
"LEFT JOIN _DBPRF_usermeta ON _DBPRF_users.ID = _DBPRF_usermeta.user_id " \
"WHERE _DBPRF_usermeta.meta_key IN ('_DBPRF_capabilities', '_DBPRF_capabilities') " \
"AND _DBPRF_usermeta.meta_value = 'a:1:{s:8:\"customer\";b:1;}'"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE _DBPRF_usermeta FROM _DBPRF_usermeta " \
"LEFT JOIN _DBPRF_users ON _DBPRF_usermeta.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
#
# if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE _DBPRF_wc_customer_lookup FROM _DBPRF_wc_customer_lookup LEFT JOIN _DBPRF_users ON _DBPRF_wc_customer_lookup.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_order_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') LIMIT 1"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_orders(self):
next_clear = {
'result': 'process',
'function': 'clear_target_reviews',
'msg': ''
}
if not self._notice['config']['orders']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_order_exists():
# clear posts(orders)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE `_DBPRF_postmeta` FROM `_DBPRF_post_meta` pm LEFT JOIN `_DBPRF_posts` p ON p.ID = pm.meta_id"
" WHERE p.ID IS NULL"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_reviews(self):
next_clear = {
'result': 'process',
'function': 'clear_target_blogs',
'msg': ''
}
if not self._notice['config']['reviews']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
self._notice['target']['clear']['result'] = 'process'
self._notice['target']['clear']['function'] = 'clear_target_reviews'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
# def clear_target_blogs(self):
# next_clear = {
# 'result': 'process',
# 'function': 'clear_target_coupons',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config'].get('blogs'):
# return next_clear
# all_queries = {
# 'term': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_terms WHERE term_id IN (SELECT term_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag']) + ')'
# },
# 'term_taxonomy': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag'])
# },
# 'term_relationship': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'postmeta': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_postmeta WHERE post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'posts': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_posts WHERE post_type = "post"'
# },
# }
# delete = self.query_multiple_data_connector(all_queries, 'clear_blog')
# return next_clear
def clear_target_coupons(self):
next_clear = {
'result': 'process',
'function': 'clear_target_pages',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['coupons']:
return next_clear
tables = [
'postmeta',
'posts'
]
for table in tables:
where = ' post_type = "shop_coupon"'
if table == 'postmeta':
where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "shop_coupon")'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
return next_clear
# def clear_target_pages(self):
# next_clear = {
# 'result': 'process',
# 'function': '',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config']['pages']:
# return next_clear
# tables = [
# 'postmeta',
# 'posts'
# ]
# for table in tables:
# where = ' post_type = "page"'
# if table == 'postmeta':
# where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "page")'
# clear_table = self.get_connector_data(self.get_connector_url('query'), {
# 'query': json.dumps({
# 'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
# })
# })
# if (not clear_table) or (clear_table['result'] != 'success'):
# self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
# continue
# return next_clear
# TODO: TAX
def prepare_taxes_import(self):
return self
def prepare_taxes_export(self):
return self
def get_taxes_main_export(self):
id_src = self._notice['process']['taxes']['id_src']
limit = self._notice['setting']['taxes']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
# taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
taxes = self.select_data_connector(query, 'taxes')
if not taxes or taxes['result'] != 'success':
return response_error('could not get taxes main to export')
list_taxes = response_success()
if taxes['data'] and to_len(taxes['data']) > 0:
list_taxes['data'] = list()
for tax in taxes['data']:
_taxes = tax['option_value'].splitlines()
if _taxes:
tmp_taxes = [
{
'id': 1,
'name': 'Standard'
}
]
i = 2
for tax_name in _taxes:
tax_data = dict()
tax_data['id'] = i
tax_data['name'] = tax_name
tmp_taxes.append(tax_data)
i += 1
list_taxes['data'].extend(tmp_taxes)
return list_taxes
def get_taxes_ext_export(self, taxes):
url_query = self.get_connector_url('query')
tax_product_class_names = duplicate_field_value_from_list(taxes['data'], 'name')
tax_names = list()
for class_name in tax_product_class_names:
_class_name = to_str(class_name).lower()
_class_name = _class_name.replace(' ', '-')
tax_names.append(_class_name)
taxes_ext_queries = {
'tax_rates': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rates WHERE 1"
# tax_rate_class IN " + self.list_to_in_condition(tax_names),
}
}
# taxes_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(taxes_ext_queries)})
taxes_ext = self.select_multiple_data_connector(taxes_ext_queries, 'taxes')
if not taxes_ext or taxes_ext['result'] != 'success':
return response_error()
tax_zone_ids = duplicate_field_value_from_list(taxes_ext['data']['tax_rates'], 'tax_rate_id')
taxes_ext_rel_queries = {
'tax_rates_location': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rate_locations WHERE tax_rate_id IN " + self.list_to_in_condition(
tax_zone_ids),
}
}
# taxes_ext_rel = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(taxes_ext_rel_queries)})
taxes_ext_rel = self.select_multiple_data_connector(taxes_ext_rel_queries, 'taxes')
if not taxes_ext_rel or taxes_ext_rel['result'] != 'success':
return response_error()
taxes_ext = self.sync_connector_object(taxes_ext, taxes_ext_rel)
return taxes_ext
def convert_tax_export(self, tax, taxes_ext):
tax_zones = list()
tax_rate_class_1 = to_str(tax['name']).lower()
tax_rate_class_1 = tax_rate_class_1.replace(' ', '-')
if tax['name'] == 'Standard':
tax_rate_class_1 = ''
src_tax_rate = get_list_from_list_by_field(taxes_ext['data']['tax_rates'], 'tax_rate_class', tax_rate_class_1)
if src_tax_rate and to_len(src_tax_rate) > 0:
for tax_rate in src_tax_rate:
tax_zone = self.construct_tax_zone()
# tax_zone = self.addConstructDefault(tax_zone)
tax_zone['id'] = tax_rate['tax_rate_id']
tax_zone['name'] = tax_rate['tax_rate_name']
tax_zone_country = self.construct_tax_zone_country()
tax_zone_country['name'] = self.get_country_name_by_code(tax_rate['tax_rate_country']) if tax_rate['tax_rate_country'] else ''
tax_zone_country['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone_country['country_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone['country'] = tax_zone_country
tax_zone_state = self.construct_tax_zone_state()
tax_zone_state['name'] = ''
tax_zone_state['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone_state['state_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone['state'] = tax_zone_state
tax_zone['rate'] = self.construct_tax_zone_rate()
tax_zone['rate']['id'] = tax_rate['tax_rate_id']
tax_zone['rate']['name'] = tax_rate['tax_rate_name']
tax_zone['rate']['code'] = tax_rate['tax_rate_class']
tax_zone['rate']['rate'] = tax_rate['tax_rate']
tax_rates_locations = get_list_from_list_by_field(taxes_ext['data']['tax_rates_location'], 'tax_rate_id', tax_rate['tax_rate_id'])
tax_zone_city = get_list_from_list_by_field(tax_rates_locations, 'location_type', 'city')
tax_zone['postcode'] = get_row_value_from_list_by_field(tax_rates_locations, 'location_type', 'postcode', 'location_code')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
tax_zone['city'] += _tax_zone_city['location_code'] + ';'
tax_zone['priority'] = tax_rate['tax_rate_priority']
tax_zone['compound'] = True if tax_rate['tax_rate_compound'] and to_int(tax_rate['tax_rate_compound']) == 1 else False
tax_zone['is_shipping'] = True if tax_rate['tax_rate_shipping'] and to_int(tax_rate['tax_rate_shipping']) == 1 else False
tax_zones.append(tax_zone)
tax_product = self.construct_tax_product()
tax_product = self.add_construct_default(tax_product)
tax_code = to_str(tax['name']).lower()
tax_code = tax_code.replace(' ', '-')
tax_product['name'] = tax['name']
tax_product['code'] = tax_code
tax_product['created_at'] = get_current_time()
tax_product['updated_at'] = get_current_time()
tax_products = [tax_product]
tax_data = self.construct_tax()
tax_data = self.add_construct_default(tax_data)
# id_src = self._notice['process']['taxes']['id_src']
tax_data['id'] = tax['id']
tax_data['code'] = tax_code # tax['name']
tax_data['name'] = tax['name']
tax_data['created_at'] = get_current_time()
tax_data['updated_at'] = get_current_time()
tax_data['tax_zones'] = tax_zones
tax_data['tax_products'] = tax_products
return response_success(tax_data)
def get_tax_id_import(self, convert, tax, taxes_ext):
# id_src = self._notice['process']['taxes']['id_src']
return tax['id']
def check_tax_import(self, convert, tax, taxes_ext):
return True if self.get_map_field_by_src(self.TYPE_TAX, convert['id'], convert['code']) else False
def router_tax_import(self, convert, tax, taxes_ext):
return response_success('tax_import')
def before_tax_import(self, convert, tax, taxes_ext):
return response_success()
def tax_import(self, convert, tax, taxes_ext):
slug = self.sanitize_title(convert['name'])
if convert['name'] != 'Standard':
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
taxes = self.select_data_connector(query, 'taxes')
if taxes and taxes['data']:
old_tax_data = taxes['data'][0]
new_option_value = old_tax_data['option_value'] + '\n' + convert['name'] if old_tax_data['option_value'] else convert['name']
query_update = {
'type': 'query',
'query': "UPDATE `_DBPRF_options` SET `option_value` = '" + new_option_value + "' WHERE `option_name` = 'woocommerce_tax_classes'"
}
taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_update)})
else:
tax_data = {
'option_name': 'woocommerce_tax_classes',
'option_value': convert['name'],
'autoload': 'yes'
}
tax_query = self.create_insert_query_connector('options', tax_data)
tax_import = self.import_tax_data_connector(tax_query, True, convert['id'])
if self.convert_version(self._notice['target']['config']['version'], 2) >= 370:
tax_rate_classes = {
'name': convert['name'],
'slug': slug
}
tax_rate_classes_query = self.create_insert_query_connector('wc_tax_rate_classes', tax_rate_classes)
tax_rate_classes_import = self.import_data_connector(tax_rate_classes_query, 'wc_tax_rate_classes')
tax_code = to_str(convert['name']).lower()
tax_code = self.sanitize_title(tax_code.replace(' ', '-'))
self.insert_map(self.TYPE_TAX, convert['id'], 0, convert['code'], tax_code)
return response_success(convert['id'])
def after_tax_import(self, tax_id, convert, tax, taxes_ext):
if convert['tax_zones']:
tax_code = to_str(convert['name']).lower()
tax_code = tax_code.replace(' ', '-')
for tax_zone in convert['tax_zones']:
tax_rate = {
'tax_rate_country': tax_zone['country']['country_code'],
'tax_rate_state': tax_zone['state']['state_code'] if tax_zone['state']['state_code'] else '*',
'tax_rate': tax_zone['rate']['rate'] if tax_zone['rate']['rate'] else '*',
'tax_rate_name': tax_zone['rate']['name'] if tax_zone['rate']['name'] else 'Tax',
'tax_rate_priority': tax_zone.get('priority', 1),
'tax_rate_compound': 1 if tax_zone.get('compound') else 0,
'tax_rate_shipping': 1 if tax_zone.get('is_shipping') else 0,
'tax_rate_order': 0,
'tax_rate_class': '' if convert['name'] == 'Standard' else self.convert_attribute_code(tax_code)
}
tax_rate_query = self.create_insert_query_connector('woocommerce_tax_rates', tax_rate)
tax_rate_import = self.import_data_connector(tax_rate_query, 'tax')
if get_value_by_key_in_dict(tax_zone, 'postcode', False):
location_postcode = {
'location_code': get_value_by_key_in_dict(tax_zone, 'postcode', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'postcode'
}
self.import_data_connector(
self.create_insert_query_connector('woocommerce_tax_rate_locations', location_postcode), 'tax')
if get_value_by_key_in_dict(tax_zone, 'city', False):
tax_zone_city = tax_zone['city'].split(';')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
if _tax_zone_city != '' and _tax_zone_city != ' ':
location_city = {
'location_code': get_value_by_key_in_dict(tax_zone, 'city', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'city'
}
self.import_data_connector(self.create_insert_query_connector('woocommerce_tax_rate_locations', location_city), 'tax')
return response_success()
def addition_tax_import(self, convert, tax, taxes_ext):
return response_success()
# TODO: MANUFACTURER
def prepare_manufacturers_import(self):
return self
def prepare_manufacturers_export(self):
return self
def get_manufacturers_main_export(self):
id_src = self._notice['process']['manufacturers']['id_src']
limit = self._notice['setting']['manufacturers']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE (tx.taxonomy = 'product_brand' OR tx.taxonomy = 'brand' OR tx.taxonomy = 'pwb-brand') AND tx.term_id > " + to_str(
id_src) + " ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
# manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
manufacturers = self.select_data_connector(query, 'manufacturers')
if not manufacturers or manufacturers['result'] != 'success':
return response_error('could not get manufacturers main to export')
return manufacturers
def get_manufacturers_ext_export(self, manufacturers):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturers_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = 'product_cat' AND tx.term_id > 0 "
}
}
if cart_version > 223:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
manufacturers_ext_queries['brand_taxonomy_images'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name IN " + self.brand_image_in_condition(category_ids)
}
# manufacturers_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_queries)
# })
manufacturers_ext = self.select_multiple_data_connector(manufacturers_ext_queries, 'manufacturers')
if not manufacturers_ext or manufacturers_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'meta_key',
'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
manufacturers_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
}
# add custom
if manufacturers_ext_rel_queries:
# manufacturers_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_rel_queries)
# })
manufacturers_ext_rel = self.select_multiple_data_connector(manufacturers_ext_rel_queries, 'manufacturers')
if not manufacturers_ext_rel or manufacturers_ext_rel['result'] != 'success':
return response_error()
manufacturers_ext = self.sync_connector_object(manufacturers_ext, manufacturers_ext_rel)
return manufacturers_ext
def convert_manufacturer_export(self, manufacturer, manufacturers_ext):
manufacturer_data = self.construct_manufacturer()
manufacturer_data = self.add_construct_default(manufacturer_data)
manufacturer_path = manufacturer_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturer_src = False
if cart_version > 223:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'term_id', manufacturer['term_id'])
else:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', manufacturer['term_id'])
if manufacturer_src:
manufacturer_img_id = self.get_value_metadata(manufacturer_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(manufacturers_ext['data']['post_meta'], 'ID', manufacturer_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
manufacturer_path = img_meta[0]['meta_value']
manufacturer_url = to_str(img_meta[0]['guid']).replace(img_meta[0]['meta_value'], '')
brand_image = get_row_value_from_list_by_field(manufacturers_ext['data']['brand_taxonomy_images'], 'option_name', "brand_taxonomy_image" + to_str(manufacturer['term_id']), 'option_value')
if brand_image:
manufacturer_url = brand_image
manufacturer_data['id'] = manufacturer['term_id']
manufacturer_data['code'] = manufacturer['slug']
manufacturer_data['name'] = manufacturer['name']
manufacturer_data['description'] = manufacturer['description']
manufacturer_data['thumb_image']['label'] = img_label
manufacturer_data['thumb_image']['url'] = manufacturer_url
manufacturer_data['thumb_image']['path'] = manufacturer_path
manufacturer_data['created_at'] = get_current_time()
manufacturer_data['updated_at'] = get_current_time()
language_id = self._notice['src']['language_default']
manufacturer_language_data = dict()
manufacturer_language_data['name'] = manufacturer['name']
manufacturer_language_data['description'] = manufacturer['description']
manufacturer_data['languages'][language_id] = manufacturer_language_data
manufacturer_data['manufacturer'] = manufacturer
manufacturer_data['manufacturers_ext'] = manufacturers_ext
return response_success(manufacturer_data)
def get_manufacturer_id_import(self, convert, manufacturer, manufacturers_ext):
return manufacturer['term_id']
def check_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return True if self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['id']) else False
def router_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success('manufacturer_import')
def before_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
def manufacturer_import(self, convert, manufacturer, manufacturers_ext):
slug = self.sanitize_title(convert['name'])
manufacturer_term = {
'name': convert['name'],
'slug': convert['code'] if convert['code'] else slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'category')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': taxonomy,
'description': get_value_by_key_in_dict(convert, 'description', ''),
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if not manufacturer_taxonomy_import:
return response_warning('manufacturer ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_MANUFACTURER, convert['id'], manufacturer_taxonomy_import, convert['code'])
thumbnail_id = False
cate_image = ''
if convert['thumb_image']['url'] or convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'], convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path'], self.TYPE_MANUFACTURER), self._notice['target']['config']['image_manufacturer'].rstrip('/')))
if image_import_path:
cate_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_category'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(cate_image, image_details)
if thumbnail_id:
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'pwb_brand_image',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'thumbnail_id',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
'meta_key': 'brand_image_url',
'meta_value': self._notice['target']['cart_url'].rstrip('/') + '/wp-content/uploads/' + cate_image.lstrip('/')
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
if self.is_wpml() or self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if not option_value:
option_value = dict()
if taxonomy not in option_value.keys():
option_value[taxonomy] = dict()
option_value[taxonomy][to_int(term_id)] = {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
self.import_data_connector(self.create_update_query_connector('options', data_set, where), 'manufacturer')
else:
new_option_data = {
'option_name': 'wpseo_taxonomy_meta',
'option_value': php_serialize({
taxonomy: {
to_int(term_id): {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
}
}),
'autoload': 'yes'
}
self.import_data_connector(self.create_insert_query_connector('options', new_option_data), 'manufacturer')
return response_success(manufacturer_taxonomy_import)
def after_manufacturer_import(self, manufacturer_id, convert, manufacturer, manufacturers_ext):
return response_success()
def addition_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
# TODO: CATEGORY
def prepare_categories_import(self):
parent = super().prepare_categories_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
return self
def prepare_categories_export(self):
return self
def get_categories_main_export(self):
id_src = self._notice['process']['categories']['id_src']
limit = self._notice['setting']['categories']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE tx.taxonomy = 'product_cat' AND tx.term_id > " + to_str(
id_src) + " AND t.term_id IS NOT NULL ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
# "LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
# "LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
}
# categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
categories = self.select_data_connector(query, 'categories')
if not categories or categories['result'] != 'success':
return response_error('could not get manufacturers main to export')
return categories
def get_categories_ext_export(self, categories):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
parent_ids = duplicate_field_value_from_list(categories['data'], 'parent')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
taxonomy_type = 'product_cat' if not categories.get('is_blog') else 'category'
categories_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id > 0 "
},
'seo_categories': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id IN " + self.list_to_in_condition(parent_ids)
}
}
if cart_version > 255:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
# add wpml
if self._notice['src']['support']['wpml']:
categories_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'tax_product_cat' and element_id IN " + self.list_to_in_condition(
category_ids)
}
# categories_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_queries)
# })
categories_ext = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(categories_ext_queries)
})
if not categories_ext or categories_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'meta_key', 'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
categories_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
# 'seo_category': array(
# 'type': 'select',
# 'query': "SELECT * FROM _DBPRF_options WHERE option_id = 235866",
# ),
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(categories_ext['data']['icl_translations'], 'trid')
categories_ext_rel_queries['wpml_category_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type = 'tax_product_cat' and il.trid IN " + self.list_to_in_condition(trids)
}
# add seo
# if (self._notice['config']['seo']){
# ext_rel_seo_queries = model_seo->getCategoriesSeoExtRelQuery(this, categories, categories_ext)
# categories_ext_rel_queries = array_merge(categories_ext_rel_queries, ext_rel_seo_queries)
# }
# add custom
if categories_ext_rel_queries:
# categories_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_rel_queries)
# })
categories_ext_rel = self.select_multiple_data_connector(categories_ext_rel_queries, 'categories')
if not categories_ext_rel or categories_ext_rel['result'] != 'success':
return response_error()
categories_ext = self.sync_connector_object(categories_ext, categories_ext_rel)
return categories_ext
def convert_category_export(self, category, categories_ext):
category_data = self.construct_category() if not self.blog_running else self.construct_blog_category()
# category_data = self.add_construct_default(category_data)
parent = self.construct_category_parent() if not self.blog_running else self.construct_blog_category()
parent['id'] = 0
if category['parent'] and to_int(category['parent']) > 0:
parent_data = self.get_category_parent(category['parent'])
if parent_data['result'] == 'success' and parent_data['data']:
parent = parent_data['data']
category_path = img_meta = category_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
if cart_version > 255:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'term_id', category['term_id'])
else:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', category['term_id'])
if category_src:
category_img_id = self.get_value_metadata(category_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(categories_ext['data']['post_meta'], 'ID', category_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
category_path = to_str(img_meta[0]['meta_value'])
category_url = to_str(img_meta[0]['guid']).replace(category_path, '')
category_data['id'] = category['term_id']
category_data['code'] = category['slug']
category_data['name'] = category['name']
category_data['description'] = category['description']
category_data['parent'] = parent
category_data['active'] = True
category_data['thumb_image']['label'] = img_label
category_data['thumb_image']['url'] = category_url
category_data['thumb_image']['path'] = category_path
category_data['sort_order'] = 1
category_data['created_at'] = get_current_time()
category_data['updated_at'] = get_current_time()
category_data['category'] = category
category_data['categories_ext'] = categories_ext
# todo: woo2woo
category_data['display_type'] = self.get_value_metadata(category_src, 'display_type', '')
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(categories_ext['data']['icl_translations'], 'element_id', category['term_taxonomy_id'], 'trid')
if trid:
languages_data = get_list_from_list_by_field(categories_ext['data']['wpml_category_lang'], 'trid', trid)
if languages_data:
for language_data in languages_data:
category_new_data = self.construct_category_lang()
category_new_data['id'] = language_data['term_id']
category_new_data['code'] = language_data['slug']
category_new_data['name'] = language_data['name']
category_new_data['description'] = language_data['description']
if to_int(language_data['term_id']) == to_int(category['term_id']):
category_data['language_default'] = language_data['language_code']
elif 'language_default' not in category_data and not language_data['source_language_code']:
category_data['language_default'] = language_data['language_code']
category_data['languages'][language_data['language_code']] = category_new_data
else:
category_language_data = self.construct_category_lang()
language_id = self._notice['src']['language_default']
category_language_data['name'] = category['name']
category_language_data['description'] = category['description']
category_data['languages'][language_id] = category_language_data
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'product_cat' in option_value:
if to_int(category['term_id']) in option_value['product_cat']:
category_data['meta_title'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_title', '')
category_data['meta_description'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_desc', '')
category_data['meta_keyword'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_focuskw', '')
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
category_data['seo'] = getattr(self, 'categories_' + detect_seo)(category, categories_ext)
return response_success(category_data)
def get_category_parent(self, parent_id):
type_map = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
category_exist = self.select_map(self._migration_id, type_map, parent_id)
if category_exist:
return response_success({
'id': parent_id,
'code': ''
})
taxonomy_type = 'product_cat' if not self.blog_running else 'category'
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id = " + to_str(parent_id)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' AND il.`source_language_code` IS NULL AND tt.taxonomy = '" + taxonomy_type + "' and tt.term_id = " + to_str(parent_id),
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
if not categories or categories['result'] != 'success':
return response_error('could not get category parent to export')
if categories and categories['data']:
category = categories['data'][0]
categories_ext = self.get_categories_ext_export(categories)
category_convert = self.convert_category_export(category, categories_ext)
return category_convert
return response_error('could not get category parent to export')
def get_category_id_import(self, convert, category, categories_ext):
return category['term_id']
def check_category_import(self, convert, category, categories_ext):
id_imported = self.get_map_field_by_src(self.TYPE_CATEGORY, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
return True if id_imported else False
def router_category_import(self, convert, category, categories_ext):
return response_success('category_import')
def before_category_import(self, convert, category, categories_ext):
return response_success()
def category_import(self, convert, category, categories_ext):
slug = self.sanitize_title(convert['name'])
language_code = convert.get('language_code')
if self.is_wpml() and not language_code or (self.is_polylang() and not language_code):
language_code = self._notice['target']['language_default']
category_term = {
'name': convert['name'],
'slug': convert['code'] if convert['code'] else slug,
'term_group': 0,
}
category_term_query = self.create_insert_query_connector('terms', category_term)
term_id = self.import_data_connector(category_term_query, 'category')
if not term_id:
return response_warning('category' + to_str(convert['id']) + ' import false.')
taxonomy = 'product_cat'
category_taxonomy = {
'term_id': term_id,
'taxonomy': taxonomy,
'description': get_value_by_key_in_dict(convert, 'description', ''),
'parent': 0,
'count': 0
}
category_taxonomy_query = self.create_insert_query_connector('term_taxonomy', category_taxonomy)
category_taxonomy_import = self.import_category_data_connector(category_taxonomy_query, True, convert['id'])
if not category_taxonomy_import:
return response_warning('manufacturer ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_CATEGORY, convert['id'], category_taxonomy_import, slug, term_id, convert['name'],language_code)
all_queries = list()
meta_insert = {
'term_id': term_id,
'meta_key': 'display_type',
'meta_value': convert.get('display_type', '')
}
all_queries.append(self.create_insert_query_connector('termmeta', meta_insert))
if all_queries:
self.import_multiple_data_connector(all_queries, 'category')
return response_success()
def get_new_trid(self):
query = {
'type': 'select',
'query': "SELECT max(trid) as trid FROM _DBPRF_icl_translations"
}
trid = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
new_trid = 1
if trid['data']:
new_trid = to_int(trid['data'][0]['trid']) + 1
return new_trid
def after_category_import(self, category_id, convert, category, categories_ext):
return response_success()
def addition_category_import(self, convert, category, categories_ext):
return response_success()
# TODO: PRODUCT
def prepare_products_import(self):
parent = super().prepare_products_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if not self._notice['config']['add_new']:
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if os.path.isfile(file_name):
os.remove(file_name)
return self
def prepare_products_export(self):
return self
def get_products_main_export(self):
id_src = self._notice['process']['products']['id_src']
limit = self._notice['setting']['products']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') ORDER BY ID ASC LIMIT " + to_str(limit),
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE il.`element_type` = 'post_product' and il.`source_language_code` is NULL and p.ID and p.ID > " + to_str(
# id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
# limit),
'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' AND p.ID > " + to_str(
id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
limit),
}
# products = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success':
return response_error()
return products
def get_products_ext_export(self, products):
url_query = self.get_connector_url('query')
product_ids = duplicate_field_value_from_list(products['data'], 'ID')
product_id_con = self.list_to_in_condition(product_ids)
# product_id_query = self.product_to_in_condition_seourl(product_ids)
linked = self.product_to_in_condition_linked(product_ids)
product_ext_queries = {
'post_variant': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'product_variation' AND post_parent IN " + product_id_con,
},
'term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + product_id_con,
},
'post_grouped': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_parent IN " + product_id_con + " AND post_type = 'product'",
},
'parent_link': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key IN ('_upsell_ids','_crosssell_ids') AND meta_value " + linked
},
}
if self._notice['src']['support']['wpml']:
product_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and element_id IN " + product_id_con
}
# products_ext = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_queries)
# })
products_ext = self.select_multiple_data_connector(product_ext_queries, 'products')
if (not products_ext) or products_ext['result'] != 'success':
return response_error()
pro_child_ids = duplicate_field_value_from_list(products_ext['data']['post_variant'], 'ID')
all_product_ids = self.list_to_in_condition(list(set(pro_child_ids + product_ids)))
variant_id_query = self.list_to_in_condition(pro_child_ids)
taxonomy_duplicate = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'taxonomy')
attrs_taxonomy = self.get_list_from_list_by_field_as_first_key(taxonomy_duplicate, '', 'pa_')
attrs_name = list()
for attr_taxonomy in attrs_taxonomy:
attrs_name.append(self.substr_replace(attr_taxonomy, '', 0, 3))
attr_name_query = self.list_to_in_condition(attrs_name)
attr_values = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'term_id')
attr_values_query = self.list_to_in_condition(attr_values)
product_ext_rel_queries = {
'post_meta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + all_product_ids,
},
'woocommerce_attribute_taxonomies': {
'type': "select",
'query': "SELECT * FROM _DBPRF_woocommerce_attribute_taxonomies WHERE attribute_name IN " + attr_name_query,
},
'variation_term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + variant_id_query,
},
'term_attribute': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_terms WHERE term_id IN " + attr_values_query,
}
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(products_ext['data']['icl_translations'], 'trid')
product_ext_rel_queries['wpml_product_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_posts as p ON il.element_id = p.ID "
"WHERE il.element_type = 'post_product' and il.trid IN " + self.list_to_in_condition(trids)
}
product_ext_rel_queries['wpml_product_meta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")"
}
product_ext_rel_queries['wpml_term_relationship'] = {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tr.object_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")",
}
product_ext_rel_queries['attributes_icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type IN " + self.wpml_attributes_to_in_condition(
attrs_taxonomy)
}
# products_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_rel_queries)
products_ext_rel = self.select_multiple_data_connector(product_ext_rel_queries, 'products')
if (not products_ext_rel) or products_ext_rel['result'] != 'success':
return response_error()
thumbnail_id_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumbnail_id_list, 'meta_value')
gallery_ids = gallery_ids_src = list()
gallery_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_product_image_gallery')
if gallery_list:
for gallery in gallery_list:
if 'meta_value' in gallery and gallery['meta_value']:
images_ids = gallery['meta_value'].split(',')
if images_ids:
gallery_ids = list(set(gallery_ids + images_ids))
for id in gallery_ids:
if id != '':
gallery_ids_src.append(id)
all_images_ids = list(set(thumbnail_ids + gallery_ids_src))
all_images_ids_query = self.list_to_in_condition(all_images_ids)
product_ext_rel_third_queries = {
'image': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' "
"WHERE p.ID IN " + all_images_ids_query,
}
}
products_ext_third = self.get_connector_data(url_query, {
'serialize': True, 'query': json.dumps(product_ext_rel_third_queries)
})
if (not products_ext_third) or products_ext_third['result'] != 'success':
return response_error()
products_ext1 = self.sync_connector_object(products_ext_rel, products_ext_third)
products_ext = self.sync_connector_object(products_ext, products_ext1)
return products_ext
def convert_product_export(self, product, products_ext):
product_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product['ID'])
product_data = self.construct_product()
product_data = self.add_construct_default(product_data)
product_data['id'] = product['ID']
product_data['code'] = product['post_name']
product_data['sku'] = self.get_value_metadata(product_meta, '_sku')
# todo: get type prd virtual
product_type = get_row_value_from_list_by_field(product_meta, 'meta_key', '_virtual', 'meta_value')
if product_type == 'yes':
product_data['type'] = 'virtual'
product_price = ''
if to_decimal(self.get_value_metadata(product_meta, '_regular_price', 0.0000)) > 0:
product_price = self.get_value_metadata(product_meta, '_regular_price', 0.0000)
else:
product_price = self.get_value_metadata(product_meta, '_price', 0.0000)
if product_price == '' or product_price == self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000):
product_price = self.get_value_metadata(product_meta, '_min_variation_regular_price', 0.0000)
if product_price == '' or not product_price:
product_price = 0
product_data['price'] = product_price
product_data['weight'] = self.get_value_metadata(product_meta, '_weight', 0.0000)
product_data['length'] = self.get_value_metadata(product_meta, '_length', 0.0000)
product_data['width'] = self.get_value_metadata(product_meta, '_width', 0.0000)
product_data['height'] = self.get_value_metadata(product_meta, '_height', 0.0000)
product_data['status'] = True if product['post_status'] == "publish" else False
product_data['manage_stock'] = True if self.get_value_metadata(product_meta, '_manage_stock', 'no') == "yes" else False
if self.is_woo2woo():
product_data['is_in_stock'] = self.get_value_metadata(product_meta, '_stock_status', 'instock')
product_data['sold_individually'] = self.get_value_metadata(product_meta, '_sold_individually', '')
product_data['purchase_note'] = self.get_value_metadata(product_meta, '_purchase_note', '')
else:
product_data['is_in_stock'] = True if self.get_value_metadata(product_meta, '_stock_status', 'instock') == "instock" else False
product_data['qty'] = to_int(to_decimal(self.get_value_metadata(product_meta, '_stock', 0))) if to_decimal(self.get_value_metadata(product_meta, '_stock', 0)) > 0 else 0
product_data['created_at'] = convert_format_time(product['post_date'])
product_data['updated_at'] = convert_format_time(product['post_modified'])
product_data['name'] = product['post_title']
product_data['description'] = product['post_content']
product_data['short_description'] = product['post_excerpt']
product_data['menu_order'] = product['menu_order']
product_data['sort_order'] = product['menu_order']
product_data['backorders'] = self.get_value_metadata(product_meta, '_backorders', 'no')
product_data['meta_description'] = self.get_value_metadata(product_meta, '_yoast_wpseo_metadesc', '')
product_data['meta_title'] = self.get_value_metadata(product_meta, '_yoast_wpseo_title', '')
if product_data['meta_title']:
product_data['meta_title'] = product_data['meta_title'].replace('%%title%%', product_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
# image_
thumbnail_id = self.get_value_metadata(product_meta, '_thumbnail_id', 0)
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
product_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
product_data['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + to_str(thumbnail_src[0]['meta_value']).lstrip('/')
product_data['thumb_image']['url'] = to_str(product_data['thumb_image']['url']).replace('uploads/uploads', 'uploads')
gallery_ids = self.get_value_metadata(product_meta, '_product_image_gallery', '')
if gallery_ids:
gallery_ids = gallery_ids.split(',')
for gallery_id in gallery_ids:
image_gallery_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', gallery_id)
product_image_data = self.construct_product_image()
if image_gallery_src:
product_image_data['label'] = image_gallery_src[0]['post_title']
product_image_data['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + image_gallery_src[0]['meta_value'].lstrip('/')
product_image_data['url'] = to_str(product_image_data['url']).replace('uploads/uploads', 'uploads')
product_data['images'].append(product_image_data)
sale_price = self.get_value_metadata(product_meta, '_sale_price', '')
if sale_price != '':
product_data['special_price']['price'] = to_decimal(sale_price)
start_date = self.get_value_metadata(product_meta, '_sale_price_dates_from', '')
if start_date:
product_data['special_price']['start_date'] = convert_format_time(start_date)
end_date = self.get_value_metadata(product_meta, '_sale_price_dates_to', '')
if end_date:
product_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(product_meta, '_sale_price_dates_to', ''))
else:
product_data['special_price']['price'] = self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000)
if not product_data['special_price']['price']:
product_data['special_price']['price'] = 0
crosssell_ids = self.get_value_metadata(product_meta, '_crosssell_ids', '')
if crosssell_ids:
crosssell_ids_data = php_unserialize(crosssell_ids)
if crosssell_ids_data:
for crosssell_id in crosssell_ids_data:
relation = self.construct_product_relation()
relation['id'] = crosssell_id
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['children'].append(relation)
parent_crosssell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_crosssell_ids')
if parent_crosssell_list:
for parent_crosssell in parent_crosssell_list:
if parent_crosssell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_crosssell['post_id']
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['parent'].append(relation)
upsell_ids = self.get_value_metadata(product_meta, '_upsell_ids', '')
if upsell_ids:
upsell_ids_data = php_unserialize(upsell_ids)
if upsell_ids_data:
for upsell_id in upsell_ids_data:
relation = self.construct_product_relation()
relation['id'] = upsell_id
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['children'].append(relation)
parent_upsell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_upsell_ids')
if parent_upsell_list:
for parent_upsell in parent_upsell_list:
if parent_upsell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_upsell['post_id']
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['parent'].append(relation)
product_data['tax']['code'] = self.get_value_metadata(product_meta, '_tax_class', 'standard') if self.get_value_metadata(product_meta, '_tax_status', 'taxable') != 'none' else None
product_data['tax']['status'] = self.get_value_metadata(product_meta, '_tax_status', 'taxable')
# category product
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
product_category_data = self.construct_product_category()
product_category_data['id'] = product_category['term_id']
product_category_data['code'] = product_category['slug']
product_data['categories'].append(product_category_data)
if self._notice['src']['support']['manufacturers']:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'product_brand')
if not manu_src:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'pwb-brand')
if manu_src:
product_manufacturer_data = dict()
product_manufacturer_data['id'] = manu_src['term_id']
product_manufacturer_data['name'] = manu_src['name']
product_manufacturer_data['code'] = manu_src['slug']
product_data['manufacturer'] = product_manufacturer_data
# tags
product_tags = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_tag')
if product_tags:
tags = list()
for product_tag in product_tags:
tags.append(product_tag['name'])
if tags:
product_data['tags'] = ','.join(tags)
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
product_data['seo'] = getattr(self, 'products_' + detect_seo)(product, products_ext)
# TODO: convert product languages
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(products_ext['data']['icl_translations'], 'element_id', product['ID'], 'trid')
if trid:
language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_lang'], 'trid', trid)
if language_datas:
for language_data in language_datas:
if not language_data['post_title']:
continue
meta_language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_meta'], 'post_id', language_data['ID'])
term_relationship_language = get_list_from_list_by_field(products_ext['data']['wpml_term_relationship'], 'object_id', language_data['ID'])
product_new_data = self.construct_product_lang()
product_new_data['name'] = language_data['post_title']
product_new_data['code'] = language_data['post_name']
product_new_data['description'] = language_data['post_content']
product_new_data['short_description'] = language_data['post_excerpt']
product_new_data['meta_description'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_metadesc', '')
product_new_data['meta_title'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_title', '')
if product_new_data['meta_title']:
product_new_data['meta_title'] = product_new_data['meta_title'].replace('%%title%%', product_new_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
wpml_product_tags = get_list_from_list_by_field(term_relationship_language, 'taxonomy', 'product_tag')
if wpml_product_tags:
wpml_tags = list()
for wpml_product_tag in wpml_product_tags:
wpml_tags.append(wpml_product_tag['name'])
if wpml_tags:
product_new_data['tags'] = ','.join(wpml_tags)
if not language_data['source_language_code']:
product_data['language_default'] = language_data['language_code']
product_data['languages'][language_data['language_code']] = product_new_data
else:
product_language_data = self.construct_product_lang()
product_language_data['name'] = product['post_title']
product_language_data['description'] = product['post_content']
product_language_data['short_description'] = product['post_excerpt']
language_id = self._notice['src']['language_default']
product_data['languages'][language_id] = product_language_data
# attribute product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
# todo: attribute
product_attribute = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_attributes', 'meta_value')
product_attribute = php_unserialize(product_attribute)
if isinstance(product_attribute, str):
product_attribute = php_unserialize(product_attribute)
src_option_values = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
attribute_variants = list()
if product_attribute:
for attribute_key, attribute in product_attribute.items():
if to_int(attribute.get('is_taxonomy')) > 0:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute_key).replace('pa_', ''))
if not woo_attribute:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute['name']).replace('pa_', ''))
else:
woo_attribute = None
if woo_attribute:
# attributes
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = woo_attribute['attribute_id']
option_code = to_str(woo_attribute['attribute_name']).lower()
attribute_data['option_code'] = option_code.strip()
attribute_data['option_type'] = woo_attribute['attribute_type']
attribute_data['option_name'] = woo_attribute['attribute_label']
attribute_data['option_group'] = woo_attribute['attribute_orderby']
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
attribute_data['is_taxonomy'] = True if to_int(attribute.get('is_taxonomy')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = woo_attribute['attribute_label']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
tmp_values = list()
desc = list()
for option_value in src_option_values:
attribute_name = 'pa_' + to_str(woo_attribute['attribute_name']).lower()
if 'taxonomy' in option_value:
if option_value['taxonomy'] == attribute_name:
woo_term_values = get_list_from_list_by_field(
products_ext['data']['term_attribute'], 'term_id', option_value['term_id'])
if woo_term_values:
for woo_term in woo_term_values:
attribute_value = woo_term['name']
if woo_attribute['attribute_type'] in ['select', 'alg_wc_civs_image']:
option_values = to_str(woo_term['name']).split('|')
if option_values and to_len(option_values) > 1:
attribute_value = ';'.join(option_values)
tmp_values.append(attribute_value)
desc.append(option_value['description'])
values = list(map(lambda x: x.strip(), tmp_values))
if values and to_len(values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
attribute_data['option_value_name'] = ';'.join(values)
attribute_data['option_value_description'] = ';'.join(desc)
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': ';'.join(values)
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
else:
if ('is_visible' in attribute and to_int(attribute['is_visible']) == 1) or ('visible' in attribute and attribute['visible'] == 'yes'):
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
attribute_value = attribute['value']
if attribute_value and attribute_value != '':
option_values = list()
if isinstance(attribute_value, dict):
for key, attr_value in attribute_value.items():
option_values.append(attr_value)
else:
option_values = attribute_value.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = 'multiselect'
option_values = list(map(lambda x: x.strip(), option_values))
attribute_value = ';'.join(option_values)
attribute_data['option_value_name'] = attribute_value
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': attribute_value
}
# product_data['attributes'].append(attribute_data)
else:
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
option_values = attribute['value']
if option_values != '':
option_values = option_values.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
option_values = list(map(lambda x: x.strip(), option_values))
option_values = ';'.join(option_values)
attribute_data['option_value_name'] = option_values
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': option_values
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
# end
# todo: plugin add-ons
if self._notice['src']['support'].get('addons') and not self.is_woo2woo():
product_addons = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_addons', 'meta_value')
product_addons = php_unserialize(product_addons)
if product_addons and to_len(product_addons) > 0:
for product_addon in product_addons:
if not product_addon.get('options') or to_len(product_addon['options']) == 0:
continue
if product_addon.get('type') == 'radiobutton':
option_type = self.OPTION_RADIO
else:
option_type = self.OPTION_SELECT
product_option = self.construct_product_option()
product_option['code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_name'] = product_addon.get('name')
product_option['type'] = option_type
product_option['position'] = product_addon.get('position')
product_option['required'] = True if product_addon.get('required') and to_int(product_addon.get('required')) == 1 else False
product_addon_options = list()
if isinstance(product_addon.get('options'), dict):
for key, product_addon_value in product_addon['options'].items():
product_addon_options.append(product_addon_value)
else:
product_addon_options = product_addon.get('options')
for product_addon_value in product_addon_options:
product_option_value = self.construct_product_option_value()
product_option_value['code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_name'] = product_addon_value.get('label')
product_option_value['option_value_price'] = product_addon_value.get('price')
if 'Color' in product_addon.get('name', '') or 'Colour' in product_addon.get('name', ''):
if 'RNBP' in product_addon_value.get('label', ''):
product_option_value['thumb_image']['path'] = self.convert_attribute_code(to_str(product_addon_value.get('label')).replace(' (RNBP)', '')) + '.jpg'
product_option_value['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/assets/blind-images/rnbp/'
product_option['values'].append(product_option_value)
product_data['options'].append(product_option)
# todo: downloadable
product_downloadables = get_row_value_from_list_by_field(product_meta, 'meta_key', '_downloadable_files', 'meta_value')
product_downloadables = php_unserialize(product_downloadables)
if product_downloadables:
product_data['type'] = self.PRODUCT_DOWNLOAD
for key, product_downloadable in product_downloadables.items():
download_data = self.construct_product_downloadable()
download_data['limit'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_limit', 'meta_value')
download_data['max_day'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_expiry', 'meta_value')
name_file = to_str(product_downloadable['file']).split('/') if product_downloadable.get('file') else None
if self._notice['src']['cart_url'] in product_downloadable['file'] and name_file:
download_data['name'] = to_str(product_downloadable['file']).split('/')
download_data['path'] = 'woocommerce/' + to_str(name_file[to_len(name_file) - 1]).lower()
else:
download_data['name'] = product_downloadable['name']
download_data['path'] = product_downloadable['file']
# Thieu max_day,limit
product_data['downloadable'].append(download_data)
# todo: group product
child_group_product = self.get_value_metadata(product_meta, '_children', '')
if child_group_product:
child_group_product = php_unserialize(child_group_product)
if child_group_product and to_len(child_group_product) > 0:
for child_group_product_id in child_group_product:
product_data['group_child_ids'].append({
'id': child_group_product_id
})
product_data['type'] = self.PRODUCT_GROUP
# todo: child product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
all_child = dict()
child_attributes = dict()
if product_child_src:
product_data['type'] = self.PRODUCT_CONFIG
for product_child in product_child_src:
child_attributes[product_child['ID']] = dict()
child_data = self.construct_product_child()
child_data = self.add_construct_default(child_data)
child_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product_child['ID'])
child_data['id'] = product_child['ID']
child_data['sku'] = self.get_value_metadata(child_meta, '_sku', '') if self.get_value_metadata(child_meta, '_sku', '') else self.get_value_metadata(product_meta, '_sku', '')
child_data['code'] = product_child['post_name']
child_product_price = ''
if self.get_value_metadata(child_meta, '_regular_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_regular_price')
else:
if self.get_value_metadata(child_meta, '_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_price', 0.0000)
else:
child_product_price = 0
if child_product_price == '' or not child_product_price:
child_product_price = 0
child_data['price'] = child_product_price
child_data['weight'] = self.get_value_metadata(child_meta, '_weight') if self.get_value_metadata(child_meta, '_weight') else product_data['weight']
child_data['length'] = self.get_value_metadata(child_meta, '_length') if self.get_value_metadata(child_meta, '_length') else product_data['length']
child_data['width'] = self.get_value_metadata(child_meta, '_width') if self.get_value_metadata(child_meta, '_width') else product_data['width']
child_data['height'] = self.get_value_metadata(child_meta, '_height') if self.get_value_metadata(child_meta, '_height') else product_data['height']
child_data['status'] = True if product_child['post_status'] == "publish" else False
child_data['manage_stock'] = True if self.get_value_metadata(child_meta, '_manage_stock') == 'yes' else False
if self.is_woo2woo():
child_data['is_in_stock'] = self.get_value_metadata(child_meta, '_stock_status', 'instock')
child_data['sold_individually'] = self.get_value_metadata(child_meta, '_sold_individually', '')
child_data['purchase_note'] = self.get_value_metadata(child_meta, '_purchase_note', '')
else:
child_data['is_in_stock'] = True if self.get_value_metadata(child_meta, '_stock_status', 'instock') == "instock" else False
child_data['qty'] = to_int(to_decimal(self.get_value_metadata(child_meta, '_stock'))) if self.get_value_metadata(child_meta, '_stock') else 0
child_data['created_at'] = convert_format_time(product_child['post_date'])
child_data['updated_at'] = convert_format_time(product_child['post_modified'])
child_data['name'] = product_child['post_title']
child_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_data['tax']['code'] = self.get_value_metadata(child_meta, '_tax_class', 'standard')
child_data['short_description'] = ''
# image_
thumbnail_id = self.get_value_metadata(child_meta, '_thumbnail_id')
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
child_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
child_data['thumb_image']['path'] = thumbnail_src[0]['meta_value']
child_data['thumb_image']['url'] = to_str(thumbnail_src[0]['guid']).replace(thumbnail_src[0]['meta_value'], '')
sale_price = self.get_value_metadata(child_meta, '_sale_price')
if sale_price != '':
child_data['special_price']['price'] = sale_price
child_data['special_price']['start_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_from'))
child_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_to'))
child_product_language_data = self.construct_product_lang()
child_product_language_data['name'] = product_child['post_title']
child_product_language_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_product_language_data['short_description'] = product_child['post_excerpt']
language_id = self._notice['src']['language_default']
child_data['languages'][language_id] = child_product_language_data
attr_child = self.get_list_from_list_by_field_as_first_key(child_meta, 'meta_key', 'attribute_')
child_data['options'] = list()
child_data['attributes'] = list()
for attribute in attr_child:
# attribute
attribute_child_data = self.construct_product_attribute()
attr_name = to_str(attribute['meta_key']).replace('attribute_', '')
element_type = 'tax_' + attr_name
attr_name = attr_name.replace('pa_', '')
attr_name = attr_name.strip()
option_id = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
attribute_child_data['option_id'] = option_id if option_id else ''
option_name = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label')
attribute_child_data['option_name'] = option_name if option_name else attr_name
option_code = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name')
attribute_child_data['option_code'] = option_code if option_code else attr_name.lower()
option_type = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type')
# attribute_child_data['option_type'] = option_type if option_type else 'select'
attribute_child_data['option_type'] = self.OPTION_SELECT
option_group = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby')
attribute_child_data['option_group'] = option_group if option_group else 'menu_order'
# attribute language
child_attribute_language_data = self.construct_product_option_lang()
child_attribute_language_data['option_name'] = attribute_child_data['option_name']
language_id = self._notice['src']['language_default']
attribute_child_data['option_languages'][language_id] = child_attribute_language_data
# values
attribute_child_data['option_value_id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
option_value_name = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
attribute_child_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_code'] = to_str(attribute['meta_value']).lower()
attribute_child_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') else ''
language_id = self._notice['src']['language_default']
child_attribute_value_language_data = self.construct_product_option_value_lang()
child_attribute_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_languages'][language_id] = child_attribute_value_language_data
child_data['attributes'].append(attribute_child_data)
# options
child_option_data = self.construct_product_option()
child_option_data['id'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
child_option_data['code'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') else attr_name.lower()
child_option_data['option_name'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') else attr_name
child_option_data['option_code'] = child_option_data['code']
child_option_data['option_group'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') else 'menu_order'
# child_option_data['option_type'] = self.OPTION_SELECT
child_option_data['option_type'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') else 'select'
child_option_data['required'] = 1
# option language
child_option_language_data = self.construct_product_option_lang()
child_option_language_data['option_name'] = attr_name
language_id = self._notice['src']['language_default']
child_option_data['option_languages'][language_id] = child_option_language_data
# value option
child_option_value_data = self.construct_product_option_value()
child_option_value_data['id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
child_option_value_data['code'] = attribute['meta_value']
child_option_value_data['option_value_code'] = attribute['meta_value']
child_option_value_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else child_option_value_data['code']
child_option_value_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'name') else ''
# value language
child_option_value_language_data = self.construct_product_option_value_lang()
child_option_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
language_id = self._notice['src']['language_default']
child_option_value_data['option_value_languages'][language_id] = child_option_value_language_data
child_option_data['values'].append(child_option_value_data)
child_attributes[product_child['ID']][child_option_data['option_name']] = child_option_value_data['option_value_name']
all_child[to_str(product_child['ID'])] = child_data
# todo: bundle product - product bundle plugin: WPC Product Bundles for WooCommerce (Premium)
if self._notice['src']['support']['product_bundle']:
product_data['bundle_selection'] = list()
product_bundles = get_row_value_from_list_by_field(product_meta, 'meta_key', 'woosb_ids', 'meta_value')
if product_bundles:
product_data['type'] = self.PRODUCT_BUNDLE
product_bundle_list = to_str(product_bundles).split(',')
if product_bundle_list and to_len(product_bundle_list) > 0:
for product_bundle_child in product_bundle_list:
product_bundle_ids = to_str(product_bundle_child).split('/')
if product_bundle_ids and to_len(product_bundle_ids) > 0:
product_bundle_data = {
'product_id': product_bundle_ids[0],
'selection_qty': product_bundle_ids[1] if to_len(product_bundle_ids) > 1 else 1
}
product_data['bundle_selection'].append(product_bundle_data)
if self.is_woo2woo():
product_data['children'] = list(all_child.values())
else:
len_child = 1
for attribute_variant in attribute_variants:
len_child *= to_len(attribute_variant['option_value_name'].split(';'))
options_src = dict()
for attribute_variant in attribute_variants:
values = to_str(attribute_variant['option_value_name']).split(';')
option_data = self.construct_product_option()
option_data['id'] = attribute_variant['option_id']
option_data['option_name'] = attribute_variant['option_name']
option_data['option_code'] = attribute_variant['option_code']
option_data['option_type'] = 'select'
for value in values:
if len_child > self.VARIANT_LIMIT:
option_data_value = self.construct_product_option_value()
option_data_value['option_value_name'] = value
option_data['values'].append(option_data_value)
opt_val = {
'option_name': attribute_variant['option_name'],
'option_code': attribute_variant['option_code'],
'option_languages': attribute_variant['option_languages'],
'option_id': attribute_variant['option_id'],
'option_value_name': value,
}
if attribute_variant['option_name'] not in options_src:
options_src[attribute_variant['option_name']] = list()
options_src[attribute_variant['option_name']].append(opt_val)
if len_child > self.VARIANT_LIMIT:
product_data['options'].append(option_data)
if len_child <= self.VARIANT_LIMIT and child_attributes:
combinations = self.combination_from_multi_dict(options_src)
list_child = list()
if combinations:
for combination in combinations:
if not combination:
continue
children_id = None
check_any = False
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination) and child_id not in list_child:
children_id = child_id
list_child.append(child_id)
break
if not children_id:
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination, True) and child_id not in list_child:
children_id = child_id
check_any = True
break
if not children_id:
continue
child = copy.deepcopy(all_child[children_id])
child['attributes'] = list()
for attribute in combination:
attribute_data = self.construct_product_attribute()
attribute_data['option_name'] = attribute['option_name']
attribute_data['option_code'] = attribute['option_code']
attribute_data['option_languages'] = attribute['option_languages']
attribute_data['option_id'] = attribute['option_id']
attribute_data['option_value_name'] = attribute['option_value_name']
child['attributes'].append(attribute_data)
product_data['children'].append(child)
else:
if attribute_variants:
product_data['attributes'] = attribute_variants
return response_success(product_data)
def get_product_id_import(self, convert, product, products_ext):
return product['ID']
def check_product_import(self, convert, product, products_ext):
language_code = convert.get('language')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], language_code)
all_queries = list()
data_attr = dict()
if convert['attributes']:
position = 0
for attribute in convert['attributes']:
check_attribute_exist = True if self.select_map(self._migration_id, self.TYPE_ATTR, None, None, attribute['option_name'], None, None, language_code) else False
if check_attribute_exist == False:
data_attribute = {
'attribute_name': attribute['option_code'],
'attribute_label': attribute['option_code'],
'attribute_type': attribute['option_type'],
'attribute_orderby': 'menu_order',
'attribute_public': attribute['option_name'],
}
attribute_query = self.create_insert_query_connector('woocommerce_attribute_taxonomies', data_attribute)
attribute_map_id = self.import_product_data_connector(attribute_query, True, convert['id'])
if not attribute_map_id:
return response_error('Attribute ' + to_str(attribute['option_id']) + ' import false.')
self.insert_map(self.TYPE_ATTR, attribute['option_id'], attribute_map_id, attribute['option_code'],)
else:
attribute_map_id = self.get_map_field_by_src(self.TYPE_ATTR, attribute['option_id'], attribute['option_code'])
check_attribute_value_exist = True if self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, attribute['option_value_name'], None, None, language_code) else False
if check_attribute_value_exist == False:
data_attribute_value = {
'name': self.sanitize_title(attribute['option_value_name']),
'slug': self.sanitize_title(attribute['option_value_name']),
'term_group': 0,
}
attribute_value_query = self.create_insert_query_connector('terms', data_attribute_value)
attribute_value_id = self.import_product_data_connector(attribute_value_query, True, convert['id'])
if not attribute_value_id:
return response_error('Attribute Value' + to_str(attribute['option_id']) + ' import false.')
self.insert_map(self.TYPE_ATTR_VALUE, attribute_map_id, attribute_value_id, self.sanitize_title(attribute['option_value_name']), )
else:
attribute_value_id = self.get_map_field_by_src(self.TYPE_ATTR_VALUE, attribute_map_id, self.sanitize_title(attribute['option_value_name']))
attr_taxonomy = 'pa_' + attribute['option_code']
check_attribute_term_exist = True if self.select_map(self._migration_id, self.TYPE_ATTR_OPTION, attribute_value_id, None, None, None, None, language_code) else False
if check_attribute_term_exist == False:
data_attribute_term = {
'term_id': attribute_value_id,
'taxonomy': attr_taxonomy,
'description': '',
'parent': 0,
'count': 0,
}
data_attribute_term_query = self.create_insert_query_connector('term_taxonomy', data_attribute_term)
term_taxonomy_id = self.import_product_data_connector(data_attribute_term_query, True, convert['id'])
self.insert_map(self.TYPE_ATTR_OPTION, attribute_value_id, term_taxonomy_id, attr_taxonomy)
else:
term_taxonomy_id = self.get_map_field_by_src(self.TYPE_ATTR_OPTION, attribute_value_id, attr_taxonomy)
data_terms_relationship = {
'object_id': product_id,
'term_taxonomy_id': term_taxonomy_id,
'term_order': 0
}
all_queries.append(self.create_insert_query_connector('term_relationships', data_terms_relationship))
data_attr[attr_taxonomy] = {
'name': attr_taxonomy,
'value': attribute['option_value_name'],
'position': position,
'is_visible': 1,
'is_variation': 0,
'is_taxonomy': 1
}
position += 1
if data_attr:
data_update = {
'post_id': product_id,
'meta_key': '_product_attributes',
'meta_value': php_serialize(data_attr)
}
where = {
'post_id': product_id,
'meta_key': '_product_attributes'
}
all_queries.append(self.create_update_query_connector('postmeta', data_update, where))
if all_queries:
self.import_multiple_data_connector(all_queries, 'products')
return self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
def update_latest_data_product(self, product_id, convert, product, products_ext):
all_query = list()
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
# todo: update product name
# begin
product_query = self.create_update_query_connector("posts", {'ID': product_id, 'post_title': convert['name']}, {'ID': product_id})
all_query.append(product_query)
# end
old_url_key = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], 'code_desc')
# todo: update product category
# begin
category_desc = self.select_all_category_map()
all_categories = list()
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, lang = language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
# todo: delete old category product
query_cate = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` IN " + self.list_to_in_condition(category_desc) + ""
}
self.query_data_connector(query_cate, 'update_product')
for cate_id in all_categories:
query_cate_prod = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` = " + to_str(cate_id) + ""
}
check_product_category = self.select_data_connector(query_cate_prod, 'category_product')
if (not check_product_category) or check_product_category['result'] != 'success' or (to_len(check_product_category['data']) == 0):
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_query.append(category_query)
# End
stock_status = 'instock'
if 'is_in_stock' in convert:
stock_status = 'instock' if convert['is_in_stock'] else 'outofstock'
else:
stock_status = 'outofstock' if convert['manage_stock'] else 'instock'
tax_class = ''
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], field = 'code_desc')
product_meta = {
'_stock_status': stock_status,
'_downloadable': "yes" if convert['type'] == self.PRODUCT_DOWNLOAD else "no",
'_virtual': "yes" if convert['type'] == self.PRODUCT_VIRTUAL else "no",
'_regular_price': convert['price'],
'_sale_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00') or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None) else "",
'_tax_status': convert['tax'].get('status', ("taxable" if to_int(convert['tax']['id']) or convert['tax']['code'] else "none")),
'_tax_class': tax_class if tax_class else '',
'_weight': convert['weight'] if convert['weight'] else '',
'_length': convert['length'] if convert['length'] else '',
'_width': convert['width'] if convert['width'] else '',
'_height': convert['height'] if convert['height'] else '',
'_sku': convert['sku'],
'_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else convert['price'],
'_manage_stock': "yes" if convert['manage_stock'] or convert['manage_stock'] == True else "no",
'_stock': convert['qty'] if convert['qty'] else 0,
# 'show_on_pos': '1' if convert['pos'] else 0,
}
if convert['special_price']['start_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_from'] = self.to_timestamp(convert['special_price']['start_date'])
if convert['special_price']['end_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_to'] = self.to_timestamp(convert['special_price']['end_date'])
if 'group_prices' in convert and to_len(convert['group_prices']) > 0:
product_meta['wholesale_customer_wholesale_price'] = convert['group_prices'][0]['price']
all_meta_queries = list()
for meta_key, meta_value in product_meta.items():
meta_insert = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(product_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query = self.create_update_query_connector("postmeta", meta_insert, {'post_id': product_id, 'meta_key': meta_key})
all_query.append(meta_query)
# todo: update children
children_list = list()
option_list = list()
if convert['children']:
children_list = convert['children']
else:
if convert['options']:
option_list = convert['options']
if self.count_child_from_option(convert['options']) <= self.VARIANT_LIMIT:
children_list = self.convert_option_to_child(option_list, convert)
if children_list and to_len(children_list) <= self.VARIANT_LIMIT:
for key_child, product_child in enumerate(children_list):
children_id = self.get_map_field_by_src(self.TYPE_CHILD, product_child['id'], product_child['code'], lang = language_code)
if not children_id:
continue
if product_child.get('is_in_stock'):
child_stock_status = 'instock' if product_child['is_in_stock'] else 'outofstock'
else:
child_stock_status = 'outofstock' if product_child['manage_stock'] else 'instock'
children_meta = {
'_stock_status': child_stock_status,
'_sku': product_child['sku'] if product_child['sku'] else '',
'_weight': product_child['weight'] if product_child['weight'] else '',
'_length': product_child['length'] if product_child['length'] else '',
'_width': product_child['width'] if product_child['width'] else '',
'_height': product_child['height'] if product_child['height'] else '',
'_manage_stock': "yes" if product_child['manage_stock'] else "no",
'_stock': product_child['qty'] if product_child['qty'] else 0,
'_regular_price': product_child['price'],
'_sale_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
'_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
}
if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
if product_child['special_price']['start_date']:
children_meta['_sale_price_dates_from'] = self.to_timestamp(product_child['special_price']['start_date'])
if product_child['special_price']['end_date']:
children_meta['_sale_price_dates_to'] = self.to_timestamp(product_child['special_price']['end_date'])
for meta_key, meta_value in children_meta.items():
meta_insert_child = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(children_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query_child = self.create_update_query_connector('postmeta', meta_insert_child, {'post_id': children_id, 'meta_key': meta_key})
all_query.append(meta_query_child)
# todo: seo
# begin
if self.is_exist_lecm_rewrite():
if (self._notice['config']['seo'] or self._notice['config']['seo_301']) and convert['seo']:
delete_query = list()
delete_query.append(self.create_delete_query_connector('lecm_rewrite', {'type': 'product', 'type_id': product_id}))
self.query_multiple_data_connector(delete_query)
for seo_url in convert['seo']:
if not seo_url['request_path']:
continue
if old_url_key != seo_url['request_path'].replace(' ', ''):
query_check = {
'link': seo_url['request_path']
}
if self.is_wpml() and convert.get('language_code'):
query_check['lang'] = convert['language_code']
seo_query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_lecm_rewrite WHERE " + self.dict_to_where_condition(query_check)
}
check_seo_exit = self.select_data_connector(seo_query, 'lecm_rewrite')
if check_seo_exit and check_seo_exit['result'] == 'success' and to_len(check_seo_exit['data']) > 0:
continue
else:
le_url_rewrite = {
'link': to_str(seo_url['request_path']).rstrip('/'),
'type': 'product',
'type_id': product_id
}
if self.is_wpml():
le_url_rewrite['lang'] = convert.get('language_code')
if self._notice['config']['seo_301']:
le_url_rewrite['redirect_type'] = 301
self.import_data_connector(self.create_insert_query_connector("lecm_rewrite", le_url_rewrite), 'seo_product')
self.import_multiple_data_connector(all_query, 'update_product')
if self.is_wpml() and not convert.get('language_code'):
where_product_wpml = {
'migration_id': self._migration_id,
'type': 'product',
}
if convert['id']:
where_product_wpml['id_src'] = convert['id']
else:
where_product_wpml['code'] = convert['code']
product_wpml = self.select_obj(TABLE_MAP, where_product_wpml)
if product_wpml['result'] == 'success' and product_wpml['data']:
for product_wpml_row in product_wpml['data']:
if product_wpml_row['id_desc'] == product_id or not product_wpml_row.get('lang'):
continue
convert_wpml = self.get_convert_data_language(convert, target_language_id = language_code)
convert_wpml['language_code'] = product_wpml_row['lang']
self.update_latest_data_product(product_wpml_row['id_desc'], convert_wpml, product, products_ext)
return response_success()
# đọc cái này là làm được bài oke oke
def update_product_after_demo(self, product_id, convert, product, products_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
all_queries = list()
query_delete = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id = ' + to_str(product_id) + ' AND term_taxonomy_id IN (SELECT term_taxonomy_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['product_brand', 'product_cat']) + ')'
}
all_queries.append(query_delete)
# category
all_categories = list()
if convert['categories']:
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
for cate_id in all_categories:
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_queries.append(category_query)
if self._notice['target']['support']['manufacturers']:
if convert['manufacturer']['id'] or convert['manufacturer']['name']:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['manufacturer']['id'])
if not manufacturer_id:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, None, convert['manufacturer']['id'])
if manufacturer_id:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(category_query)
elif convert['manufacturer']['name']:
slug = self.sanitize_title(convert['manufacturer']['name'])
manufacturer_term = {
'name': convert['manufacturer']['name'],
'slug': slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'manufacturer')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': 'product_brand',
'description': '',
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if manufacturer_taxonomy_import:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
relationship_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(relationship_query)
self.insert_map(self.TYPE_MANUFACTURER, convert['manufacturer']['id'], manufacturer_taxonomy_import, convert['manufacturer']['name'])
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], 'code_desc')
if tax_class:
meta_insert = {
'post_id': product_id,
'meta_key': '_tax_class',
'meta_value': tax_class
}
where_meta = {
'post_id': product_id,
'meta_key': '_tax_class',
}
all_queries.append(self.create_update_query_connector('postmeta', meta_insert, where_meta))
self.import_multiple_data_connector(all_queries, 'update_product')
return response_success()
def router_product_import(self, convert, product, products_ext):
return response_success('product_import')
def before_product_import(self, convert, product, products_ext):
return response_success()
def product_import(self, convert, product, products_ext):
self.log(product, 'queries')
all_query = list()
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
code_name = convert['name']
code_name = self.sanitize_title(code_name).strip('-')
if self.is_wpml() and language_code:
code_name = code_name + '-' + language_code
check_slug_exist = True
while check_slug_exist:
check_slug_exist = True if self.select_map(self._migration_id, self.TYPE_PRODUCT, None, None, None,code_name, None, language_code) else False
if check_slug_exist:
code_name += to_str(get_value_by_key_in_dict(convert, 'id', ''))
product_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_title': convert['name'],
'post_status': 'publish',
'ping_status': 'open',
'post_name': code_name[:200],
'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_parent': '',
'post_type': 'product',
'comment_count': 0,
'guid': '',
'post_content_filtered': '',
'post_excerpt': convert['short_description'],
'to_ping': '',
'pinged': '',
# 'post_parent': 0,
'post_content': convert['description'],
'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0)
}
product_data_query = self.create_insert_query_connector('posts', product_data)
product_id = self.import_product_data_connector(product_data_query, True, convert['id'])
if not product_id:
return response_error('Product ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_PRODUCT, convert['id'], product_id, convert['name'], code_name, None, language_code)
return response_success(product_id)
def after_product_import(self, product_id, convert, product, products_ext):
self.log(product_id, 'product_id')
self.log(convert, 'convert_after_pro_import')
language_code = convert.get('language_code')
if self.is_wpml() and not language_code or (self.is_polylang() and not language_code):
language_code = self._notice['target']['language_default']
all_queries = list()
thumbnail_id = False
if convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'], convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path']), self._notice['target']['config']['image_product'].rstrip('/')))
if image_import_path:
product_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_product'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(product_image, image_details, convert['thumb_image'].get('label', ''))
if thumbnail_id and self.is_wpml():
all_queries.append(self.get_query_img_wpml(thumbnail_id, language_code))
gallery_ids = list()
if convert['images']:
for image in convert['images']:
image_process = self.process_image_before_import(image['url'], image['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path']), self._notice['target']['config']['image_product'].rstrip('/')))
if image_import_path:
product_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_product'])
image_details = self.get_sizes(image_process['url'])
img_id = self.wp_image(product_image, image_details, image['label'])
if img_id:
gallery_ids.append(img_id)
if self.is_wpml():
all_queries.append(self.get_query_img_wpml(img_id, language_code))
stock_status = 'instock'
product_meta = {
'_product_attributes': php_serialize(list()),
'_sku': convert['sku'],
'_stock_status': stock_status,
'_weight': convert['weight'] if convert['weight'] else '',
'_length': convert['length'] if convert['length'] else '',
'_width': convert['width'] if convert['width'] else '',
'_height': convert['height'] if convert['height'] else '',
'_price': convert['special_price']['price'],
'_stock': convert['qty'] if convert['qty'] else 0,
'_thumbnail_id': thumbnail_id if thumbnail_id else '',
'_manage_stock': 'yes' if convert['manage_stock'] or convert['manage_stock'] == True else 'no',
}
all_meta_queries = list()
for meta_key, meta_value in product_meta.items():
meta_insert = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_meta_queries.append(meta_query)
if all_meta_queries:
self.import_multiple_data_connector(all_meta_queries, 'products')
all_categories = list()
if convert['categories']:
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
for cate_id in all_categories:
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_queries.append(category_query)
if all_queries:
self.import_multiple_data_connector(all_queries, 'product')
return response_success()
def addition_product_import(self, convert, product, products_ext):
return response_success()
def finish_product_import(self):
if self.is_variant_limit:
self._notice['config']['variant_limit'] = True
return response_success()
# TODO: CUSTOMER
# def prepare_customers_import(self):
# return self
# def prepare_customers_export(self):
# return self
def prepare_customers_import(self):
if self._notice['config'].get('cus_pass'):
delete_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'LEPP_TYPE' OR option_name = 'LEPP_URL'"
}
config_delete = self.import_data_connector(delete_query)
all_queries = list()
type_data = {
'option_name': 'LEPP_TYPE',
'option_value': self._notice['src']['cart_type'],
'autoload': 'yes'
}
type_query = self.create_insert_query_connector('options', type_data)
all_queries.append(type_query)
url_data = {
'option_name': 'LEPP_URL',
'option_value': self._notice['src']['cart_url'],
'autoload': 'yes'
}
url_query = self.create_insert_query_connector('options', url_data)
all_queries.append(url_query)
if all_queries:
self.import_multiple_data_connector(all_queries, 'customer')
return self
def get_customers_main_export(self):
id_src = self._notice['process']['customers']['id_src']
limit = self._notice['setting']['customers']
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
query = {
'type': 'select',
'query': "SELECT * FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND ID > " + to_str(id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# customers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
customers = self.select_data_connector(query, 'customers')
if not customers or customers['result'] != 'success':
return response_error()
return customers
def get_customers_ext_export(self, customers):
url_query = self.get_connector_url('query')
customers_ids = duplicate_field_value_from_list(customers['data'], 'ID')
customer_ext_queries = {
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE user_id IN " + self.list_to_in_condition(
customers_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
customer_ext_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
customer_ext_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
# customers_ext = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(customer_ext_queries)})
customers_ext = self.select_multiple_data_connector(customer_ext_queries, 'customers')
if not customers_ext or customers_ext['result'] != 'success':
return response_error()
return customers_ext
def convert_customer_export(self, customer, customers_ext):
customer_data = self.construct_customer()
customer_data = self.add_construct_default(customer_data)
customer_data['id'] = customer['ID']
customer_data['code'] = customer['user_login']
customer_data['username'] = customer['user_nicename']
customer_data['email'] = customer['user_email']
customer_data['password'] = customer['user_pass']
customer_data['website'] = customer['user_url']
customer_data['user_url'] = customer['user_url']
customer_data['active'] = True
customer_data['created_at'] = convert_format_time(customer['user_registered'])
customer_meta = get_list_from_list_by_field(customers_ext['data']['user_meta'], 'user_id', customer['ID'])
customer_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'first_name', 'meta_value')
customer_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'last_name', 'meta_value')
prefix = self._notice['src']['config']['table_prefix']
capabilities = to_str(prefix) + '_capabilities'
customer_data['capabilities'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', capabilities, 'meta_value')
# billing
address_data = self.construct_customer_address()
address_data['code'] = to_str(customer['ID']) + "_1"
address_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_first_name', 'meta_value')
address_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_last_name', 'meta_value')
address_data['address_1'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_1', 'meta_value')
address_data['address_2'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_2', 'meta_value')
address_data['city'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_city', 'meta_value')
address_data['postcode'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_postcode', 'meta_value')
address_data['telephone'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_phone', 'meta_value')
address_data['company'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_company', 'meta_value')
address_data['fax'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_fax', 'meta_value')
address_data['country']['country_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_country', 'meta_value')
address_data['country']['code'] = address_data['country']['country_code']
address_data['country']['name'] = self.get_country_name_by_code(address_data['country']['country_code'])
address_data['state']['state_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_state', 'meta_value')
address_data['state']['code'] = address_data['state']['state_code']
address_data['default']['billing'] = True
if address_data['address_1'] or address_data['address_2']:
customer_data['address'].append(address_data)
# shipping
shipping_address = self.get_list_from_list_by_field_as_first_key(customer_meta, 'meta_key', 'shipping_')
if shipping_address:
shipping_data = self.construct_customer_address()
shipping_data['code'] = to_str(customer['ID']) + "_2"
shipping_data['first_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_first_name', 'meta_value')
shipping_data['last_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_last_name', 'meta_value')
shipping_data['address_1'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_1', 'meta_value')
shipping_data['address_2'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_2', 'meta_value')
shipping_data['city'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_city', 'meta_value')
shipping_data['postcode'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_postcode', 'meta_value')
shipping_data['telephone'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_phone', 'meta_value')
shipping_data['company'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_company', 'meta_value')
shipping_data['fax'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_fax', 'meta_value')
shipping_data['country']['country_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_country', 'meta_value')
shipping_data['country']['code'] = shipping_data['country']['country_code']
shipping_data['country']['name'] = self.get_country_name_by_code(shipping_data['country']['code'])
shipping_data['state']['state_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_state', 'meta_value')
shipping_data['state']['code'] = shipping_data['state']['state_code']
shipping_data['default']['shipping'] = True
if shipping_data['address_1'] or shipping_data['address_2']:
customer_data['address'].append(shipping_data)
# customer_data['first_name'] = customer_data['first_name'] if customer_data['first_name']: address_data['first_name']
# customer_data['last_name'] = customer_data['last_name'] if customer_data['last_name']: address_data['last_name']
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
customer_point_rewards['points_balance'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'wc_points_balance', 'meta_value')
wc_points_rewards_user_points = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points'], 'user_id', customer['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['order_id'] = points_rewards_user_points['order_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points_log'], 'user_id', customer['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['order_id'] = points_rewards_user_points_log['order_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
customer_data['point_rewards'] = customer_point_rewards
return response_success(customer_data)
def get_customer_id_import(self, convert, customer, customers_ext):
return customer['ID']
def check_customer_import(self, convert, customer, customers_ext):
return True if self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['id'], convert['code']) else False
def router_customer_import(self, convert, customer, customers_ext):
return response_success('customer_import')
def before_customer_import(self, convert, customer, customers_ext):
return response_success()
def customer_import(self, convert, customer, customers_ext):
customer_users = {
'user_login': convert['username'],
'user_pass': convert['password'],
'user_nicename': convert['first_name'],
'user_email': convert['email'],
'user_url': '',
'user_registered': 0,
'user_activation_key': "0",
'user_status': 0,
'display_name': convert['last_name'] + ' ' + convert['first_name'],
}
customer_users_query = self.create_insert_query_connector('users', customer_users)
customer_id = self.import_data_connector(customer_users_query, 'customer')
if not customer_id:
return response_warning('customer' + to_str(convert['id']) + ' import false.')
return response_success(customer_id)
def get_new_trid(self):
query = {
'type': 'select',
'query': "SELECT max(trid) as trid FROM _DBPRF_icl_translations"
}
trid = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
new_trid = 1
if trid['data']:
new_trid = to_int(trid['data'][0]['trid']) + 1
return new_trid
def after_customer_import(self, customer_id, convert, customer, customers_ext):
a = 1
customer_meta = {
'billing_first_name': convert['first_name'],
'billing_last_name': convert['last_name'],
'billing_company': convert['address'][0]['company'],
'billing_address_1': convert['address'][0]['address_1'],
'billing_address_2': convert['address'][0]['address_2'],
'billing_city': convert['address'][0]['city'],
'billing_postcode': '',
'billing_country': '',
'billing_state': convert['address'][0]['state'],
'billing_phone': convert['address'][0]['telephone'],
'shipping_first_name': convert['first_name'],
'shipping_last_name': convert['last_name'],
'shipping_company': convert['address'][0]['company'],
'shipping_address_1': convert['address'][0]['address_1'],
'shipping_address_2': convert['address'][0]['address_2'],
'shipping_city': convert['address'][0]['city'],
'shipping_postcode': '',
'shipping_country': '',
'billing_state': convert['address'][0]['state'],
'billing_phone': convert['address'][0]['telephone'],
}
all_meta_queries = list()
for meta_key, meta_value in customer_meta.items():
meta_insert = {
'post_id': customer_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_meta_queries.append(meta_query)
if all_meta_queries:
self.import_multiple_data_connector(all_meta_queries, 'products')
return response_success()
def addition_customer_import(self, convert, customer, customers_ext):
return response_success()
# TODO: ORDER
def prepare_orders_import(self):
return self
def prepare_orders_export(self):
return self
def get_orders_main_export(self):
id_src = self._notice['process']['orders']['id_src']
limit = self._notice['setting']['orders']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID > " + to_str(
id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# orders = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
orders = self.select_data_connector(query, 'orders')
if not orders or orders['result'] != 'success':
return response_error()
return orders
def get_orders_ext_export(self, orders):
url_query = self.get_connector_url('query')
order_ids = duplicate_field_value_from_list(orders['data'], 'ID')
customer_ext_queries = {
'woocommerce_order_items': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_items WHERE order_id IN " + self.list_to_in_condition(
order_ids),
},
'order_note': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_comments WHERE comment_post_ID IN " + self.list_to_in_condition(
order_ids),
},
'order_refund': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order_refund' AND post_parent IN " + self.list_to_in_condition(
order_ids),
},
'order_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(order_ids),
},
}
orders_ext = self.select_multiple_data_connector(customer_ext_queries, 'orders')
if not orders_ext or orders_ext['result'] != 'success':
return response_error()
# product_option_value_ids = duplicate_field_value_from_list(orders_ext['data']['order_option'], 'product_option_value_id')
# order_recurrings = duplicate_field_value_from_list(orders_ext['data']['order_recurring'], 'order_recurring_id')
order_item_ids = duplicate_field_value_from_list(orders_ext['data']['woocommerce_order_items'], 'order_item_id')
comment_ids = duplicate_field_value_from_list(orders_ext['data']['order_note'], 'comment_ID')
refund_ids = duplicate_field_value_from_list(orders_ext['data']['order_refund'], 'ID')
post_meta_ids = list(set(refund_ids + order_ids))
cus_list = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'meta_key', '_customer_user')
cus_ids = list()
if cus_list:
cus_ids = duplicate_field_value_from_list(cus_list, 'meta_value')
orders_ext_rel_queries = {
'woocommerce_order_itemmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN " + self.list_to_in_condition(order_item_ids),
},
'order_note_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(comment_ids),
},
'postmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(post_meta_ids),
},
'user': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_users WHERE ID IN " + self.list_to_in_condition(cus_ids),
},
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE meta_key IN ('first_name','last_name') AND user_id IN " + self.list_to_in_condition(cus_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
orders_ext_rel_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel = self.select_multiple_data_connector(orders_ext_rel_queries, 'orders')
if not orders_ext_rel or orders_ext_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_rel)
pro_list = get_list_from_list_by_field(orders_ext_rel['data']['woocommerce_order_itemmeta'], 'meta_key', '_product_id')
pro_ids = duplicate_field_value_from_list(pro_list, 'meta_value')
orders_ext_third_rel_queries = {
'products_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(pro_ids),
},
}
orders_ext_third_rel = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(orders_ext_third_rel_queries)})
if not orders_ext_third_rel or orders_ext_third_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_third_rel)
return orders_ext
def convert_order_export(self, order, orders_ext):
order_data = self.construct_order()
order_data = self.add_construct_default(order_data)
order_data['id'] = order['ID']
order_data['status'] = order['post_status']
# order data
order_items = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_items'], 'order_id', order['ID'])
shipping = get_row_from_list_by_field(order_items, 'order_item_type', 'shipping')
taxes = get_list_from_list_by_field(order_items, 'order_item_type', 'tax')
tax_names = list()
total_tax = 0.0
if taxes:
tax_names = duplicate_field_value_from_list(taxes, 'order_item_name')
for tax in taxes:
order_tax_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', tax['order_item_id'])
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'tax_amount', 0.0))
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'shipping_tax_amount', 0.0))
if 'postmeta' in orders_ext['data']:
order_meta = get_list_from_list_by_field(orders_ext['data']['postmeta'], 'post_id', order['ID'])
else:
order_meta = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'post_id', order['ID'])
ord_number = get_row_value_from_list_by_field(order_meta, 'meta_key', '_order_number', 'meta_value')
if ord_number and self._notice['src']['support'].get('plugin_pre_ord'):
order_data['order_number'] = ord_number
order_data['tax']['title'] = '|'.join(tax_names) if tax_names else 'Tax'
order_data['tax']['amount'] = total_tax if total_tax else self.get_value_metadata(order_meta, '_order_tax', 0.0000)
order_data['shipping']['title'] = shipping['order_item_name'] if shipping else 'Shipping'
order_data['shipping']['amount'] = self.get_value_metadata(order_meta, '_order_shipping', 0.0000) # _order_shipping_tax
discount_title = get_row_value_from_list_by_field(order_items, 'order_item_type', 'coupon', 'order_item_name')
order_data['discount']['title'] = discount_title if discount_title else 'Discount'
order_data['discount']['amount'] = self.get_value_metadata(order_meta, '_cart_discount', 0.0000)
order_data['total']['title'] = 'Total'
order_data['total']['amount'] = self.get_value_metadata(order_meta, '_order_total', 0.0000)
order_data['subtotal']['title'] = 'Total'
order_data['subtotal']['amount'] = to_decimal(self.get_value_metadata(order_meta, '_order_total', 0.0000)) - to_decimal(self.get_value_metadata(order_meta, '_cart_discount', 0.0000)) - to_decimal(order_data['tax']['amount']) - to_decimal(order_data['shipping']['amount'])
order_data['currency'] = self.get_value_metadata(order_meta, '_order_currency', 'meta_value')
order_data['created_at'] = convert_format_time(order['post_date'])
order_data['updated_at'] = convert_format_time(order['post_modified'])
# order customer
order_customer = self.construct_order_customer()
order_customer = self.add_construct_default(order_customer)
order_customer_src = self.get_value_metadata(order_meta, '_customer_user', 'meta_value')
if order_customer_src and to_int(order_customer_src) > 0:
customer_src = get_row_from_list_by_field(orders_ext['data']['user'], 'ID', order_customer_src)
customer_meta_src = get_list_from_list_by_field(orders_ext['data']['user_meta'], 'user_id', order_customer_src)
if customer_src:
order_customer['id'] = order_customer_src
order_customer['code'] = get_value_by_key_in_dict(customer_src, 'user_login', '')
order_customer['email'] = get_value_by_key_in_dict(customer_src, 'user_email', self.get_value_metadata(order_meta, '_billing_email', 'meta_value'))
order_customer['username'] = get_value_by_key_in_dict(customer_src, 'display_name', '')
order_customer['first_name'] = self.get_value_metadata(customer_meta_src, 'first_name', self.get_value_metadata(order_meta, '_billing_first_name', ''))
order_customer['last_name'] = self.get_value_metadata(customer_meta_src, 'last_name', self.get_value_metadata(order_meta, '_billing_last_name', ''))
else:
order_customer['email'] = self.get_value_metadata(order_meta, '_billing_email', 'meta_value')
order_customer['username'] = order_customer['email']
order_customer['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_customer['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_data['customer'] = order_customer
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
wc_points_rewards_user_points = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points'], 'order_id', order['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['user_id'] = points_rewards_user_points['user_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points_log'], 'order_id', order['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['user_id'] = points_rewards_user_points_log['user_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
order_data['point_rewards'] = customer_point_rewards
# customer address
customer_address = self.construct_order_address()
customer_address = self.add_construct_default(customer_address)
customer_address['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
customer_address['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
customer_address['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
customer_address['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
customer_address['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
customer_address['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
customer_address['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
customer_address['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
customer_address['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
customer_address['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['name'] = self.get_country_name_by_code(customer_address['country']['country_code'])
customer_address['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
customer_address['state']['code'] = customer_address['state']['state_code']
order_data['customer_address'] = customer_address
# billing address
order_billing = self.construct_order_address()
order_billing = self.add_construct_default(order_billing)
order_billing['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_billing['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_billing['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
order_billing['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
order_billing['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
order_billing['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
order_billing['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
order_billing['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
order_billing['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
order_billing['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['name'] = self.get_country_name_by_code(order_billing['country']['country_code'])
order_billing['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
order_billing['state']['code'] = order_billing['state']['state_code']
order_billing['code'] = self.convert_attribute_code(to_str(order_billing['first_name']) + '-' + to_str(order_billing['last_name']) + '-' + to_str(order_billing['address_1']) + '-' + to_str(order_billing['address_2']))
order_data['billing_address'] = order_billing
# shipping address
order_delivery = self.construct_order_address()
order_delivery = self.add_construct_default(order_delivery)
order_delivery['first_name'] = self.get_value_metadata(order_meta, '_shipping_first_name', '')
order_delivery['last_name'] = self.get_value_metadata(order_meta, '_shipping_last_name', '')
order_delivery['email'] = self.get_value_metadata(order_meta, '_shipping_email', '')
order_delivery['address_1'] = self.get_value_metadata(order_meta, '_shipping_address_1', '')
order_delivery['address_2'] = self.get_value_metadata(order_meta, '_shipping_address_2', '')
order_delivery['city'] = self.get_value_metadata(order_meta, '_shipping_city', '')
order_delivery['postcode'] = self.get_value_metadata(order_meta, '_shipping_postcode', '')
order_delivery['telephone'] = self.get_value_metadata(order_meta, '_shipping_phone', '') if self.get_value_metadata(order_meta, '_shipping_phone', '') else self.get_value_metadata(order_meta, '_shipping_Phone_No', '')
order_delivery['company'] = self.get_value_metadata(order_meta, '_shipping_company', '')
order_delivery['country']['code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['country_code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['name'] = self.get_country_name_by_code(order_delivery['country']['country_code'])
order_delivery['state']['state_code'] = self.get_value_metadata(order_meta, '_shipping_state', '')
order_delivery['state']['code'] = order_delivery['state']['state_code']
order_delivery['code'] = self.convert_attribute_code(to_str(order_delivery['first_name']) + '-' + to_str(order_delivery['last_name']) + '-' + to_str(order_delivery['address_1']) + '-' + to_str(order_delivery['address_2']))
order_data['shipping_address'] = order_delivery
# order_data['user_history'] = self.get_value_metadata(order_meta, '_user_history', '')
order_products = get_list_from_list_by_field(order_items, 'order_item_type', 'line_item')
order_items = list()
for order_product in order_products:
order_product_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', order_product['order_item_id'])
qty = self.get_value_metadata(order_product_metas, '_qty', 1)
if to_int(qty) == 0:
qty = 1
order_item_subtotal = self.get_value_metadata(order_product_metas, '_line_subtotal', 0.0000)
order_item = self.construct_order_item()
order_item = self.add_construct_default(order_item)
order_item['id'] = order_product['order_item_id']
order_item['product']['id'] = self.get_value_metadata(order_product_metas, '_variation_id', self.get_value_metadata(order_product_metas, '_product_id', 0))
order_item['product']['code'] = self.get_value_metadata(order_product_metas, '_product_code', 0)
product_meta = get_list_from_list_by_field(orders_ext['data']['products_meta'], 'post_id', order_item['product']['id'])
order_item['product']['sku'] = self.get_value_metadata(product_meta, '_sku', '')
order_item['product']['name'] = order_product['order_item_name']
order_item['qty'] = to_decimal(qty) if qty != '' else 1
order_item['price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['original_price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['tax_amount'] = self.get_value_metadata(order_product_metas, '_line_tax', 0.0000)
order_item['subtotal'] = order_item_subtotal
order_item['total'] = self.get_value_metadata(order_product_metas, '_line_total', 0.0000)
order_item['options'] = list()
if order_product['order_item_type'] == 'line_item':
order_item_options = list()
keys = {'_qty', '_tax_class', '_product_id', '_variation_id', '_line_subtotal', '_line_subtotal_tax',
'_line_total', '_line_tax', '_line_tax_data', '_original_order_item_id'}
for order_product_meta in order_product_metas:
if order_product_meta['meta_key'] not in keys:
order_item_option = self.construct_order_item_option()
# order_item_option['option_name'] = order_product_meta['meta_key']
order_item_option['option_name'] = unquote(order_product_meta['meta_key'])
if order_item_option['option_name'] and 'pa_' in order_item_option['option_name']:
continue
order_item_option['option_value_name'] = order_product_meta['meta_value']
# unquote(order_product['order_item_name'])
order_item_options.append(order_item_option)
order_item['options'] = order_item_options
order_items.append(order_item)
order_data['items'] = order_items
order_notes = get_list_from_list_by_field(orders_ext['data']['order_note'], 'comment_post_ID', order['ID'])
order_history = list()
for order_note in order_notes:
order_note_meta = get_list_from_list_by_field(orders_ext['data']['order_note_meta'], 'comment_id', order_note['comment_ID'])
order_history = self.construct_order_history()
order_history = self.add_construct_default(order_history)
order_history['id'] = order_note['comment_ID']
order_history['status'] = order_note['comment_approved']
order_history['comment'] = order_note['comment_content']
order_history['notified'] = self.get_value_metadata(order_note_meta, 'is_customer_note', False)
order_history['created_at'] = convert_format_time(order_note['comment_date'])
order_data['history'].append(order_history)
order_payment = self.construct_order_payment()
order_payment = self.add_construct_default(order_payment)
order_payment['id'] = order['ID']
order_payment['method'] = self.get_value_metadata(order_meta, '_payment_method')
order_payment['title'] = self.get_value_metadata(order_meta, '_payment_method_title')
# custom order_number plugin WooCommerce Sequential Order Numbers
# order_data['order_number'] = self.get_value_metadata(order_meta, '_order_number', '')
# order_data['order_number_formatted'] = self.get_value_metadata(order_meta, '_order_number_formatted', '')
# order_data['order_number_meta'] = self.get_value_metadata(order_meta, '_order_number_meta', '')
order_data['payment'] = order_payment
return response_success(order_data)
def get_order_id_import(self, convert, order, orders_ext):
return order['ID']
def check_order_import(self, convert, order, orders_ext):
return self.get_map_field_by_src(self.TYPE_ORDER, convert['id'], convert['code'])
def update_order_after_demo(self, order_id, convert, order, orders_ext):
all_queries = list()
delete_query = list()
# order item
delete_query_child = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN (SELECT order_item_id FROM _DBPFF_woocommerce_order_items WHERE order_id = ' + to_str(order_id) + ')'
}
delete_query.append(delete_query_child)
delete_query.append(self.create_delete_query_connector('woocommerce_order_items', {'order_id': order_id}))
self.import_multiple_data_connector(delete_query, 'delete_ord_update')
order_items = convert['items']
for item in order_items:
order_item_data = {
'order_item_name': item['product']['name'],
'order_item_type': 'line_item',
'order_id': order_id
}
order_item_query = self.create_insert_query_connector("woocommerce_order_items", order_item_data)
order_item_id = self.import_data_connector(order_item_query, 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, item['product']['id'])
if not product_id:
product_id = 0
order_item_meta = {
'_qty': item['qty'],
'_tax_class': '',
'_product_id': product_id,
'_variation_id': '',
'_line_subtotal': item['subtotal'],
'_line_total': item['total'],
'_line_subtotal_tax': 0,
'_line_tax': 0,
'_line_tax_data': php_serialize({
'total': 0,
'subtotal': 0
}),
}
for meta_key, meta_value in order_item_meta.items():
meta_insert = {
'order_item_id': order_item_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
for option in item['options']:
meta_insert = {
'order_item_id': order_item_id,
'meta_key': option['option_name'],
'meta_value': option['option_value_name']
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
return response_success()
def router_order_import(self, convert, order, orders_ext):
return response_success('order_import')
def before_order_import(self, convert, order, orders_ext):
return response_success()
def order_import(self, convert, order, orders_ext):
order_post = {
'post_author' : 1,
'post_date' : convert.get('created_at') if convert.get('created_at') else get_current_time(),
'post_date_gmt' : convert['updated_at'] if convert['updated_at'] is not None else get_current_time(),
'post_content' : '',
'post_excerpt': '',
'post_status': 'wc-completed',
'ping_status': 'closed',
'post_password': '',
'post_name': '',
'to_ping': '',
'pinged': '',
'post_modified': convert.get('created_at') if convert.get('created_at') else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] is not None else get_current_time(),
'post_content_filtered': '',
'post_parent': '',
'guid': '',
'menu_order': '',
'post_type': 'shop_order',
'post_mime_type': '',
'comment_count': '',
}
order_post_query = self.create_insert_query_connector('posts', order_post)
customer_id = self.import_data_connector(order_post_query, 'order')
if not customer_id:
return response_warning('order' + to_str(convert['id']) + ' import false.')
return response_success()
def after_order_import(self, order_id, convert, order, orders_ext):
order_post = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': 'Order',
'post_excerpt': '',
'post_status': 'wc-completed',
'comment_status': 'open',
'ping_status': 'closed',
'post_name': 'order',
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': '',
'menu_order': 0,
'post_type': 'shop_order',
'comment_count': 0,
}
order_query = self.create_insert_query_connector('posts', order_post)
order_id = self.import_order_data_connector(order_query, 'order')
if not order_id:
return response_warning('Order ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_ORDER, convert['id'], order_id, convert['code'])
return response_success(order_id)
def after_order_import(self, order_id, convert, order, orders_ext):
all_queries = list()
billing_address = convert['billing_address']
shipping_address = convert['shipping_address']
order_meta = {
'_billing_first_name': billing_address['first_name'],
'_billing_last_name': billing_address['last_name'],
'_billing_company': billing_address['company'],
'_billing_address_1': billing_address['address_1'],
'_billing_address_2': billing_address['address_2'],
'_billing_city': billing_address['city'],
'_billing_state': get_value_by_key_in_dict(billing_address['state'], 'state_code', billing_address['state']['name']) if billing_address and billing_address['state'] else '',
'_billing_country': get_value_by_key_in_dict(billing_address['country'], 'country_code', '') if billing_address and billing_address['country'] else '',
'_billing_postcode': billing_address['postcode'],
'_billing_phone': billing_address['telephone'],
'_shipping_first_name': shipping_address['first_name'],
'_shipping_last_name': shipping_address['last_name'],
'_shipping_company': shipping_address['company'],
'_shipping_address_1': shipping_address['address_1'],
'_shipping_address_2': shipping_address['address_2'],
'_shipping_city': shipping_address['city'],
'_shipping_state': get_value_by_key_in_dict(shipping_address['state'], 'state_code', billing_address['state']['name']) if billing_address and billing_address['state'] else '',
'_shipping_country': get_value_by_key_in_dict(shipping_address['country'], 'country_code', '') if billing_address and billing_address['country'] else '',
'_shipping_postcode': shipping_address['postcode'],
'_shipping_phone': shipping_address['telephone'],
'_order_total': convert['total']['amount'],
}
for meta_key, meta_value in order_meta.items():
meta_insert = {
'post_id': order_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_queries.append(meta_query)
order_items = convert['items']
for item in order_items:
order_item_data = {
'order_item_name': item['product']['name'],
'order_item_type': 'line_item',
'order_id': order_id
}
order_item_query = self.create_insert_query_connector("woocommerce_order_items", order_item_data)
order_item_id = self.import_data_connector(order_item_query, 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'], item['product']['code'])
if self.is_wpml():
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'], item['product']['code'], self._notice['target']['language_default'])
else:
product_id = 0
subtotal = item.get('subtotal', to_decimal(item['price']) * to_int(item['qty']))
if to_int(subtotal) == 0:
subtotal = item['price']
order_item_meta = {
'_qty': item['qty'],
'_tax_class': '',
'_product_id': product_id,
'_variation_id': '',
'_line_subtotal': subtotal,
'_line_total': subtotal,
'_line_subtotal_tax': 0,
'_line_tax': 0,
'_line_tax_data': php_serialize({
'total': 0,
'subtotal': 0
})
}
if product_id == 0 and item['product']['sku']:
order_item_meta['SKU'] = item['product']['sku']
for meta_key, meta_value in order_item_meta.items():
meta_insert = {
'order_item_id': order_item_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
for option in item['options']:
meta_insert = {
'order_item_id': order_item_id,
'meta_key': option['option_name'],
'meta_value': option['option_value_name']
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
if all_queries:
self.import_multiple_data_connector(all_queries, 'order')
return response_success()
def addition_order_import(self, convert, order, orders_ext):
return response_success()
# TODO: REVIEW
def prepare_reviews_import(self):
return self
def prepare_reviews_export(self):
return self
def get_reviews_main_export(self):
id_src = self._notice['process']['reviews']['id_src']
limit = self._notice['setting']['reviews']
query = {
'type': 'select',
'query': "SELECT cm.*, p.post_type FROM _DBPRF_comments AS cm "
"LEFT JOIN _DBPRF_posts AS p ON p.ID = cm.comment_post_ID "
"WHERE p.post_type = 'product' AND cm.comment_ID > " + to_str(
id_src) + " ORDER BY cm.comment_ID ASC LIMIT " + to_str(limit)
}
# reviews = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
reviews = self.select_data_connector(query, 'reviews')
if not reviews or reviews['result'] != 'success':
return response_error()
return reviews
def get_product_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key = '_downloadable_files' AND post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
return php_unserialize(products['data'][0]['meta_value'])
def get_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
download_data = dict()
for data in products['data']:
if data['meta_key'] in ['_download_expiry', '_download_limit']:
download_data[data['meta_key']] = data['meta_value'] if to_int(data['meta_value']) > 0 else None
return download_data
def get_reviews_ext_export(self, reviews):
url_query = self.get_connector_url('query')
reviews_ids = duplicate_field_value_from_list(reviews['data'], 'comment_ID')
product_ids = duplicate_field_value_from_list(reviews['data'], 'comment_post_ID')
review_ext_queries = {
'comment_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(
reviews_ids),
},
'product_info': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID IN " + self.list_to_in_condition(product_ids),
}
}
# reviews_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(review_ext_queries)})
reviews_ext = self.select_multiple_data_connector(review_ext_queries, 'reviews')
if not reviews_ext or reviews_ext['result'] != 'success':
return response_error()
return reviews_ext
def convert_review_export(self, review, reviews_ext):
review_data = self.construct_review()
# review_data = self.add(review_data)
review_data['id'] = review['comment_ID']
product_info = get_row_from_list_by_field(reviews_ext['data']['product_info'], 'ID', review['comment_post_ID'])
review_data['product']['id'] = review['comment_post_ID']
if product_info:
review_data['product']['code'] = product_info['post_name']
review_data['product']['name'] = product_info['post_title']
review_data['customer']['id'] = review['user_id']
review_data['customer']['code'] = review['comment_author_email']
review_data['customer']['name'] = review['comment_author']
review_data['title'] = ''
review_data['content'] = review['comment_content']
rv_status = {
'0': 2, # pending
'1': 1, # approved
'spam': 3 # not approved
}
review_data['status'] = rv_status.get(to_str(review['comment_approved']), 'spam')
review_data['created_at'] = convert_format_time(review['comment_date'])
review_data['updated_at'] = convert_format_time(review['comment_date'])
rating = self.construct_review_rating()
review_meta = get_list_from_list_by_field(reviews_ext['data']['comment_meta'], 'comment_id', review['comment_ID'])
rating['id'] = get_row_value_from_list_by_field(review_meta, 'comment_id', review['comment_ID'], 'meta_id')
rating['rate_code'] = 'default'
rating['rate'] = self.get_value_metadata(review_meta, 'rating', 5)
review_data['rating'].append(rating)
return response_success(review_data)
def get_review_id_import(self, convert, review, reviews_ext):
return review['comment_ID']
def check_review_import(self, convert, review, reviews_ext):
return True if self.get_map_field_by_src(self.TYPE_REVIEW, convert['id'], convert['code']) else False
def router_review_import(self, convert, review, reviews_ext):
return response_success('review_import')
def before_review_import(self, convert, review, reviews_ext):
return response_success()
def review_import(self, convert, review, reviews_ext):
lang_code = self._notice['target']['language_default']
if convert.get('store_id'):
lang_code = self._notice['map']['languages'].get(to_str(convert['store_id']))
product_id = False
if convert['product']['id'] or convert['product']['code']:
if self.is_wpml():
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'], lang = lang_code)
else:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, convert['product']['code'], lang = lang_code)
if not product_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'], 'product of review not exists.')
return response_error(msg)
customer_id = 0
if convert['customer']['id'] or convert['customer']['code']:
customer_id = self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['customer']['id'])
if not customer_id:
customer_id = 0
rv_status = {
'2': 0, # pedding
'1': 1, # approved
'3': 'spam', # not approved
'0': 0
}
review_data = {
'comment_post_ID': product_id,
'comment_author': convert['customer']['name'],
'comment_author_email': '',
'comment_date': convert.get('created_at') if convert.get('created_at') else get_current_time(),
'comment_date_gmt': convert['updated_at'] if convert['updated_at'] is not None else get_current_time(),
'comment_content': convert['content'] if convert['content'] else '',
'comment_karma': 0,
'comment_approved': rv_status.get(str(convert['status']), 'spam'),
'comment_parent': 0,
'comment_type': "review",
'user_id': customer_id
}
review_query = self.create_insert_query_connector("comments", review_data)
review_id = self.import_review_data_connector(review_query, True, convert['id'])
if not review_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'])
return response_error(msg)
self.insert_map(self.TYPE_REVIEW, convert['id'], review_id, convert['code'])
return response_success(review_id)
def after_review_import(self, review_id, convert, review, reviews_ext):
ratings = convert['rating']
for rating in ratings:
comment_meta = {
'rating': to_int(rating['rate'])
}
for meta_key, meta_value in comment_meta.items():
meta_insert = {
'comment_id': review_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("commentmeta", meta_insert)
self.import_data_connector(meta_query, 'review')
return response_success()
def addition_review_import(self, convert, review, reviews_ext):
return response_success()
# TODO: Page
def check_page_import(self, convert, page, pages_ext):
return True if self.get_map_field_by_src(self.TYPE_PAGE, convert['id'], convert['code'], lang = self._notice['target']['language_default']) else False
def page_import(self, convert, page, pages_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
code_name = convert['title']
code_name = self.sanitize_title(code_name).strip('-')
if self.is_wpml() and language_code:
code_name = code_name + '-' + language_code
check_slug_exist = True
while check_slug_exist:
check_slug_exist = True if self.select_map(self._migration_id, self.TYPE_PAGE, None, None, None, code_name, None, language_code) else False
if check_slug_exist:
code_name += to_str(get_value_by_key_in_dict(convert, 'id', ''))
parent_id = self.get_map_field_by_src(self.TYPE_PAGE, to_int(convert['parent_id']), None, language_code)
if not parent_id:
parent_id = 0
data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': convert['content'] if convert['content'] else "",
'post_title': convert['title'],
'post_status': 'publish' if convert['status'] else 'trash',
'comment_status': convert.get('comment_status', 'open'),
'ping_status': 'open',
'post_name': code_name[:200],
'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_parent': parent_id,
'post_type': 'page',
'comment_count': 0,
'guid': '',
'post_excerpt': '',
'to_ping': '',
'pinged': '',
'post_content_filtered': '',
'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0)
}
page_query = self.create_insert_query_connector('posts', data)
page_id = self.import_page_data_connector(page_query, True, convert['id'])
if not page_id:
return response_error('Page ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_PAGE, convert['id'], page_id, convert['title'], code_name, None, language_code)
return response_success(page_id)
def after_page_import(self, page_id, convert, page, pages_ext):
# data = {
# 'guid': self._notice['target']['cart_url'] + '?p=' + str(page_id)
# }
# where_id = {
# 'id': page_id
# }
# update_query = self.create_update_query_connector('posts', data, where_id)
# self.import_data_connector(update_query, 'page')
# data_meta = {
# 'post_id': page_id,
# 'meta_key': '_edit_lock',
# 'meta_value': int(time.time()),
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', data_meta), True, convert['id'])
# thumbnail_id = False
# if convert['images']:
# for image in convert['images']:
# image_process = self.process_image_before_import(image['url'], image.get('path', ''))
# image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path']), self._notice['target']['config']['image_product'].rstrip('/')))
# if image_import_path:
# product_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_product'])
# image_details = self.get_sizes(image_process['url'])
# thumbnail_id = self.wp_image(product_image, image_details)
# postmeta = dict()
# if thumbnail_id:
# postmeta['_thumbnail_id'] = thumbnail_id
# for meta_key, value in postmeta.items():
# postmeta_data = {
# 'post_id': page_id,
# 'meta_key': meta_key,
# 'meta_value': value
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', postmeta_data), True, convert['id'])
# data_revision = {
# 'post_author': 1,
# 'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_content': convert['content'],
# 'post_title': convert['title'],
# 'post_status': 'inherit',
# 'comment_status': 'closed',
# 'ping_status': 'closed',
# 'post_name': str(page_id) + '-revision-v1',
# 'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_parent': page_id,
# 'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0),
# 'post_type': 'revision',
# 'comment_count': 0,
# 'guid': self._notice['target']['cart_url'] + '/2019/08/27/' + str(page_id) + '-revision-v1',
# 'post_excerpt': '',
# 'to_ping': '',
# 'pinged': '',
# 'post_content_filtered': ''
# }
# self.import_page_data_connector(self.create_insert_query_connector('posts', data_revision), True, convert['id'])
super().after_page_import(page_id, convert, page, pages_ext)
if self.is_wpml():
source_language_code = self._notice['target']['language_default']
language_code = convert.get('language_code')
if not language_code:
language_code = source_language_code
source_language_code = None
trid = convert.get('trid')
if not trid:
trid = self.get_new_trid()
wpml_default = {
'element_type': 'post_page',
'element_id': page_id,
'trid': trid,
'language_code': language_code,
'source_language_code': source_language_code
}
self.import_data_connector(self.create_insert_query_connector("icl_translations", wpml_default), 'page')
if not convert.get('language_code'):
list_target_id = list()
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if target_language_id in list_target_id or to_str(target_language_id) == to_str(self._notice['target']['language_default']):
continue
list_target_id.append(target_language_id)
page_lang = self.get_convert_data_language(convert, src_language_id)
page_lang['trid'] = trid
page_lang['language_code'] = target_language_id
page_import = self.page_import(page_lang, page, pages_ext)
if page_import['result'] == 'success':
self.after_page_import(page_import['data'], page_lang, page, pages_ext)
return response_success()
# TODO: Coupon
def prepare_coupons_import(self):
return response_success()
def prepare_coupons_export(self):
return self
def get_coupons_main_export(self):
id_src = self._notice['process']['coupons']['id_src']
limit = self._notice['setting']['coupons']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'shop_coupon' ORDER BY ID ASC LIMIT " + to_str(limit),
}
coupons = self.select_data_connector(query, 'coupons')
if not coupons or coupons['result'] != 'success':
return response_error()
return coupons
def get_coupons_ext_export(self, coupons):
coupon_ids = duplicate_field_value_from_list(coupons['data'], 'ID')
coupon_id_con = self.list_to_in_condition(coupon_ids)
coupon_ext_queries = {
'postmeta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + coupon_id_con
},
}
coupons_ext = self.select_multiple_data_connector(coupon_ext_queries, 'products')
if (not coupons_ext) or coupons_ext['result'] != 'success':
return response_error()
return coupons_ext
def convert_coupon_export(self, coupon, coupons_ext):
coupon_data = self.construct_coupon()
coupon_data['id'] = coupon['ID']
postmeta = get_list_from_list_by_field(coupons_ext['data']['postmeta'], 'post_id', coupon['ID'])
coupon_data['code'] = coupon['post_title']
coupon_data['title'] = coupon['post_name']
coupon_data['description'] = coupon['post_excerpt']
coupon_data['status'] = True if coupon['post_status'] == 'publish' else False
coupon_data['created_at'] = convert_format_time(coupon['post_date'])
coupon_data['updated_at'] = convert_format_time(coupon['post_modified'])
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'date_expires'))
if not coupon_data['to_date']:
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'expiry_date'))
coupon_data['min_spend'] = self.get_value_metadata(postmeta, 'minimum_amount') if to_str(self.get_value_metadata(postmeta, 'minimum_amount')) != 'None' else None
coupon_data['max_spend'] = self.get_value_metadata(postmeta, 'maximum_amount') if to_str(self.get_value_metadata(postmeta, 'maximum_amount')) != 'None' else None
coupon_data['times_used'] = self.get_value_metadata(postmeta, 'usage_count')
coupon_data['usage_limit'] = self.get_value_metadata(postmeta, 'usage_limit', 0)
coupon_data['discount_amount'] = self.get_value_metadata(postmeta, 'coupon_amount')
coupon_data['usage_per_customer'] = self.get_value_metadata(postmeta, 'usage_limit_per_user')
coupon_data['type'] = self.PERCENT if self.get_value_metadata(postmeta, 'discount_type') == 'percent' else self.FIXED
coupon_data['simple_free_shipping'] = 1 if self.get_value_metadata(postmeta, 'free_shipping') == 'yes' else 0
coupon_data['limit_usage_to_x_items'] = self.get_value_metadata(postmeta, 'limit_usage_to_x_items')
product_ids = self.get_value_metadata(postmeta, 'product_ids')
if product_ids:
coupon_data['products'] = to_str(product_ids).split(',')
category_ids = self.get_value_metadata(postmeta, 'product_categories')
if category_ids:
category_ids = php_unserialize(category_ids)
if category_ids:
coupon_data['categories'] = category_ids
return response_success(coupon_data)
def get_coupon_id_import(self, convert, coupon, coupons_ext):
return coupon['ID']
def check_coupon_import(self, convert, coupon, coupons_ext):
return True if self.get_map_field_by_src(self.TYPE_COUPON, convert['id'], convert['code']) else False
def router_coupon_import(self, convert, coupon, coupons_ext):
return response_success('coupon_import')
def before_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def coupon_import(self, convert, coupon, coupons_ext):
coupon_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': convert['code'] if convert['code'] else convert['title'],
'post_excerpt': self.change_img_src_in_text(get_value_by_key_in_dict(convert, 'description', '')),
'post_status': "publish" if convert['status'] else "draft",
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': self.strip_html_tag(convert['title']),
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=",
'menu_order': convert.get('menu_order', 0),
'post_type': "shop_coupon",
'post_mime_type': '',
'comment_count': 0
}
coupon_query = self.create_insert_query_connector('posts', coupon_data)
coupon_import = self.import_data_connector(coupon_query, 'coupons', convert['id'])
if not coupon_import:
return response_error()
self.insert_map(self.TYPE_COUPON, convert['id'], coupon_import, convert['code'])
return response_success(coupon_import)
def after_coupon_import(self, coupon_id, convert, coupon, coupons_ext):
all_queries = list()
product_ids = convert.get('products')
if product_ids:
product_id_map_arr = list()
for product_id in product_ids:
map_product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, product_id)
if map_product_id and map_product_id not in product_id_map_arr:
product_id_map_arr.append(to_str(map_product_id))
if product_id_map_arr:
product_ids = ','.join(product_id_map_arr)
else:
product_ids = None
category_ids = convert.get('categories')
cate_id_map_arr = list()
if category_ids:
for category_id in category_ids:
map_cate_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category_id)
if map_cate_id and map_cate_id not in cate_id_map_arr:
cate_id_map_arr.append(to_str(map_cate_id))
# if product_id_map_arr:
# product_ids = ','.join(cate_id_map_arr)
# else:
# product_ids = None
coupon_meta = {
'discount_type': 'percent' if convert['type'] == self.PERCENT else 'fixed_cart' if convert['type'] == self.FIXED else 'fixed_product',
'coupon_amount': convert['discount_amount'],
'usage_limit': convert['usage_limit'],
'usage_limit_per_user': convert['usage_per_customer'],
'free_shipping': 'yes' if 'simple_free_shipping' in convert and to_str(to_int(convert['simple_free_shipping'])) == '1' else 'no',
'usage_count': convert['times_used'],
'date_expires': convert['to_date'] if (convert['to_date'] and convert['to_date'] != '0000-00-00 00:00:00') else '',
'minimum_amount': convert['min_spend'],
'maximum_amount': convert['max_spend'],
'product_ids': product_ids if product_ids else None,
'product_categories': php_serialize(cate_id_map_arr) if cate_id_map_arr else '',
'customer_email': php_serialize(convert.get('customer')),
'limit_usage_to_x_items': convert.get('limit_usage_to_x_items', 0),
}
for meta_key, meta_value in coupon_meta.items():
meta_insert = {
'post_id': coupon_id,
'meta_key': meta_key,
'meta_value': str(meta_value).replace(')', '').replace(',', '').replace("'", '')
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_queries.append(meta_query)
all_queries.append(self.create_update_query_connector('posts', {'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=" + to_str(coupon_id)}, {'ID': coupon_id}))
self.import_multiple_data_connector(all_queries, 'coupons')
return response_success()
def addition_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def display_finish_target(self):
migration_id = self._migration_id
recent_exist = self.select_row(TABLE_RECENT, {'migration_id': migration_id})
notice = json.dumps(self._notice)
if recent_exist:
self.update_obj(TABLE_RECENT, {'notice': notice}, {'migration_id': migration_id})
else:
self.insert_obj(TABLE_RECENT, {'notice': notice, 'migration_id': migration_id})
target_cart_type = self._notice['target']['cart_type']
target_setup_type = self.target_cart_setup(target_cart_type)
# if target_setup_type == 'connector':
token = self._notice['target']['config']['token']
url = self.get_connector_url('clearcache', token)
self.get_connector_data(url)
all_queries = list()
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'product_cat_children'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_timeout_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'urlrewrite_type'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_posts` SET `comment_count`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_posts`.ID AND `_DBPRF_comments`.comment_approved = 1) WHERE `post_type` IN ('product', 'post')"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_postmeta`.post_id AND `_DBPRF_comments`.comment_approved = 1) WHERE `meta_key` = '_wc_review_count'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT AVG(cmta.`meta_value`) FROM `_DBPRF_comments` AS cmt LEFT JOIN `_DBPRF_commentmeta` AS cmta ON cmt.`comment_ID` = cmta.`comment_ID` WHERE cmt.`comment_post_ID` = `_DBPRF_postmeta`.`post_id` AND cmt.comment_approved = 1 AND cmta.`meta_key` = 'rating') WHERE `meta_key` = '_wc_average_rating'"
})
# all_queries.append({
# 'type': 'query',
# 'query': "UPDATE `_DBPRF_term_taxonomy` tt "
# "SET tt.count = (SELECT COUNT( *) as total "
# "FROM _DBPRF_term_relationships r JOIN _DBPRF_posts p ON r.object_id = p.ID "
# "WHERE r.term_taxonomy_id = tt.term_taxonomy_id AND p.post_type = 'product' AND p.post_parent = '') "
# "WHERE tt.taxonomy IN('product_cat', 'product_type', 'product_tag', 'product_brand')"
# })
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_term_taxonomy` AS tt SET tt.count = (SELECT COUNT(1) AS total FROM _DBPRF_term_relationships AS tr WHERE tt.term_taxonomy_id = tr.term_taxonomy_id AND tr.object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = 'product'))"
})
clear_cache = self.import_multiple_data_connector(all_queries)
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'urlrewrite',
'autoload': 'yes'
}
if self._notice['support'].get('seo_301'):
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'url301',
'autoload': 'yes'
}
option_query = self.create_insert_query_connector('options', option_data)
option_import = self.import_data_connector(option_query, 'options')
return response_success()
def substr_replace(self, subject, replace, start, length):
if length == None:
return subject[:start] + replace
elif length < 0:
return subject[:start] + replace + subject[length:]
else:
return subject[:start] + replace + subject[start + length:]
def add_construct_default(self, construct):
construct['site_id'] = 1
construct['language_id'] = self._notice['src']['language_default']
return construct
def get_term_by_name(self, data):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_visibility' AND t.name = '" + data + "'"
}
product_taxonomy = self.select_data_connector(query)
if product_taxonomy['result'] == 'success' and product_taxonomy['data']:
return product_taxonomy['data'][0]['term_taxonomy_id']
return None
def get_product_type(self, product_type):
if not self.product_types:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_type'"
}
product_types = self.select_data_connector(query)
if product_types['result'] == 'success' and product_types['data']:
for product_type_row in product_types['data']:
self.product_types[product_type_row['slug']] = product_type_row['term_taxonomy_id']
return self.product_types.get(product_type, 2)
def import_category_parent(self, convert_parent, lang_code = None):
category_type = self.TYPE_CATEGORY
if convert_parent.get('is_blog'):
category_type = self.TYPE_CATEGORY_BLOG
parent_exists = self.get_map_field_by_src(category_type, convert_parent['id'], convert_parent['code'], lang_code)
if parent_exists:
return response_success(parent_exists)
if self.is_wpml() and lang_code:
convert_parent['language_code'] = lang_code
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if to_str(lang_code) == to_str(target_language_id):
lang_data = convert_parent
if to_str(src_language_id) in convert_parent['languages'] and convert_parent['languages'][to_str(src_language_id)]:
lang_data = convert_parent['languages'][to_str(src_language_id)]
convert_parent['name'] = lang_data['name']
convert_parent['description'] = lang_data['description']
convert_parent['short_description'] = lang_data['short_description']
convert_parent['meta_title'] = lang_data['meta_title']
convert_parent['meta_keyword'] = lang_data['meta_keyword']
convert_parent['meta_description'] = lang_data['meta_description']
convert_parent['url_key'] = lang_data.get('url_key', '')
category = get_value_by_key_in_dict(convert_parent, 'category', dict())
categories_ext = get_value_by_key_in_dict(convert_parent, 'categories_ext', dict())
category_parent_import = self.category_import(convert_parent, category, categories_ext)
self.after_category_import(category_parent_import['data'], convert_parent, category, categories_ext)
return category_parent_import
def get_list_from_list_by_field_as_first_key(self, list_data, field = '', first_key = ''):
result = list()
if isinstance(list_data, dict):
for key, row in list_data.items():
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
if field and to_str(field) != '':
for row in list_data:
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
for row in list_data:
if row:
v_index = row.find(first_key)
if v_index == 0:
result.append(row)
return result
def process_image_before_import(self, url, path):
if not path:
full_url = url
path = strip_domain_from_url(url)
else:
full_url = join_url_path(url, path)
if path and path.find('/wp-content/uploads/') != -1:
newpath = path.split('/wp-content/uploads/')
if newpath and to_len(newpath) > 1:
path = newpath[1]
path = re.sub(r"[^a-zA-Z0-9.-_()]", '', path)
full_url = self.parse_url(full_url)
return {
'url': full_url,
'path': path
}
def wpml_attributes_to_in_condition(self, list_keys):
if not list_keys:
return "('null')"
result = "('tax_" + "','tax_".join([str(k) for k in list_keys]) + "')"
return result
def brand_image_in_condition(self, term_ids):
if not term_ids:
return "('null')"
result = "('brand_taxonomy_image" + "','brand_taxonomy_image".join([str(k) for k in term_ids]) + "')"
return result
def detect_seo(self):
return 'default_seo'
def categories_default_seo(self, category, categories_ext):
result = list()
seo_cate = self.construct_seo_category()
seo_cate['request_path'] = self._notice['src']['config']['product_category_base'].strip('/') + '/' + to_str(category['slug'])
seo_cate['default'] = True
result.append(seo_cate)
return result
def products_default_seo(self, product, products_ext):
result = list()
if self._notice['src']['config']['product_base'].find('%product_cat%') != -1:
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product_category['slug']) + '/' + to_str(product['post_name'])
seo_product['category_id'] = product_category['term_id']
result.append(seo_product)
else:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
if product['post_name']:
seo_product = self.construct_seo_product()
seo_product['request_path'] = to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
return result
def get_order_status_label(self, order_status):
if not order_status:
return ''
order_status = order_status.replace('wc-', '')
order_status = order_status.replace('-', ' ')
order_status = order_status.capitalize()
return order_status
def get_woo_attribute_id(self, pro_attr_code, attribute_name, language_code = None, language_attribute_data = None, attribute_type = 'select'):
# if to_str(pro_attr_code)[0:3] != 'pa_':
# pro_attr_code = "pa_" + pro_attr_code
# if self.is_wpml() and language_code != self._notice['target']['language_default']:
# attribute_data_default = self.get_convert_data_language(language_attribute_data, None, self._notice['target']['language_default'], 'option_languages')
# option_lang_name = attribute_data_default.get('option_name')
# if not option_lang_name:
# option_lang_name = attribute_data_default.get('attribute_name')
# if option_lang_name:
pro_attr_code = urllib.parse.unquote(pro_attr_code)
woo_attribute_id = self.get_map_field_by_src(self.TYPE_ATTR, None, 'pa_' + pro_attr_code)
# if woo_attribute_id:
# return woo_attribute_id
if not woo_attribute_id:
attribute_data = {
'attribute_name': pro_attr_code,
'attribute_type': attribute_type
}
attribute_result = self.select_data_connector(self.create_select_query_connector('woocommerce_attribute_taxonomies', attribute_data))
woo_attribute_id = None
if attribute_result and attribute_result['data']:
woo_attribute_id = attribute_result['data'][0]['attribute_id']
if not woo_attribute_id:
pro_attr_data = {
'attribute_name': pro_attr_code,
'attribute_label': attribute_name,
'attribute_type': attribute_type,
'attribute_orderby': "menu_order",
'attribute_public': 0,
}
woo_attribute_id = self.import_data_connector(self.create_insert_query_connector('woocommerce_attribute_taxonomies', pro_attr_data), 'products')
if woo_attribute_id:
self.insert_map(self.TYPE_ATTR, None, woo_attribute_id, 'pa_' + pro_attr_code)
if woo_attribute_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(language_attribute_data, None, language_code, 'option_languages')
option_lang_name = attribute_data_lang.get('option_name')
if not option_lang_name:
option_lang_name = attribute_data_lang.get('attribute_name')
if option_lang_name != attribute_name:
translate_id = self.get_map_field_by_src('translate', woo_attribute_id, None, language_code)
if not translate_id:
translate_query = {
'icl_strings': self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}),
'icl_string_translations': {
'type': 'select',
'query': "select * from _DBPRF_icl_string_translations where string_id in (" + self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}, 'id')['query'] + ")"
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
icl_string_id = None
is_tranlate = False
if not select['data']['icl_strings']:
icl_strings_data = {
'language': self._notice['target']['language_default'],
'context': 'WordPress',
'name': 'taxonomy singular name: ' + attribute_name,
'value': attribute_name,
'string_package_id': None,
'wrap_tag': '',
'type': 'LINE',
'title': None,
'status': 2,
'gettext_context': '',
'domain_name_context_md5': hashlib.md5(to_str('WordPresstaxonomy singular name: ' + attribute_name).encode()),
'translation_priority': 'optional',
'word_count': None
}
icl_string_id = self.import_product_data_connector(self.create_insert_query_connector('icl_strings', icl_strings_data))
else:
icl_string = select['data']['icl_strings'][0]
if icl_string['language'] != language_code:
icl_string_id = icl_string['id']
check = get_row_from_list_by_field(select['data']['icl_string_translations'], 'language', language_code)
is_tranlate = True if check else False
else:
is_tranlate = True
if icl_string_id and not is_tranlate:
icl_string_translations_data = {
'string_id': icl_string_id,
'language': language_code,
'status': 10,
'value': option_lang_name,
'translator_id': None,
'translation_service': '',
'batch_id': 0,
'translation_date': get_current_time()
}
icl_string_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_string_translations', icl_string_translations_data))
if icl_string_translation_id:
self.insert_map('translate', woo_attribute_id, icl_string_translation_id, None, None, None, language_code)
return woo_attribute_id
def get_woo_attribute_value(self, attribute_value, pro_attr_code, language_code = None, attribute_data = None, desc = ''):
pro_attr_code = urllib.parse.unquote(pro_attr_code)
if self.is_wpml():
value_data = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
attribute_value = to_str(attribute_value)[:200]
slug_default = self.get_slug_attr(attribute_data)
slug = self.get_slug_attr(attribute_data, language_code)
opt_value_id = None
# if opt_value_exist:
# return opt_value_exist['id_desc']
# opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug)
opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_exist:
if not self.is_wpml() or not language_code or language_code == self._notice['target']['language_default']:
return opt_value_exist['id_desc']
else:
opt_value_id = opt_value_exist['id_desc']
if not opt_value_id:
query = {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_value) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
attribute_result = self.select_data_connector(query)
if attribute_result and attribute_result['data']:
opt_value_id = attribute_result['data'][0]['term_taxonomy_id']
if not opt_value_id:
if self.is_wpml() and language_code != self._notice['target']['language_default']:
new_slug = slug_default + '-' + to_str(language_code) if slug == slug_default else slug
else:
new_slug = slug_default
value_term = {
'name': attribute_value,
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if attribute_data_lang['option_value_name'] != attribute_value:
translate_query = {
'icl_translations': {
'type': 'select',
'query': 'select * from _DBPRF_icl_translations where trid in (select trid from wp_icl_translations where ' + self.dict_to_where_condition({'element_id': opt_value_id, 'element_type': 'tax_pa_' + pro_attr_code}) + ')'
},
'term': {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_data_lang['option_value_name']) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
trid = None
is_tranlate = False
if not select['data']['icl_translations']:
trid = self.get_new_trid()
icl_translations_data = {
'language_code': self._notice['target']['language_default'],
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': None,
}
icl_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
else:
icl_translations = select['data']['icl_translations'][0]
trid = icl_translations['trid']
check = get_row_from_list_by_field(select['data']['icl_translations'], 'language_code', language_code)
is_tranlate = True if check else False
if trid and not is_tranlate:
new_slug = slug_default + '-' + to_str(language_code) if slug != slug_default else slug_default
value_term = {
'name': attribute_data_lang['option_value_name'],
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
icl_translations_data = {
'language_code': language_code,
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': self._notice['target']['language_default'],
}
self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
return opt_value_id
def to_timestamp(self, value, str_format = '%Y-%m-%d %H:%M:%S'):
try:
timestamp = to_int(time.mktime(time.strptime(value, str_format)))
if timestamp:
return timestamp
return to_int(time.time())
except:
return to_int(time.time())
def get_map_field_by_src(self, map_type = None, id_src = None, code_src = None, lang = None, field = 'id_desc'):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().get_map_field_by_src(map_type, id_src, code_src, field)
if not id_src and not code_src:
return False
_migration_id = self._migration_id
# if id_src:
# code_src = None
# else:
# code_src = None
map_data = self.select_map(_migration_id, map_type, id_src, None, code_src, None, None, lang)
if not map_data:
return False
return map_data.get(field, False)
def select_map(self, _migration_id = None, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().select_map(_migration_id, map_type, id_src, id_desc, code_src, code_desc, value)
where = dict()
if _migration_id:
where['migration_id'] = _migration_id
if map_type:
where['type'] = map_type
if id_src:
where['id_src'] = id_src
if id_desc:
where['id_desc'] = id_desc
if code_src:
where['code_src'] = code_src
if code_desc:
where['code_desc'] = code_desc
if value:
where['value'] = value
if (self.is_wpml() or self.is_polylang()) and map_type in [self.TYPE_CATEGORY, self.TYPE_PRODUCT, self.TYPE_ATTR, self.TYPE_ATTR_VALUE]:
where['lang'] = lang
if not where:
return None
result = self.select_obj(TABLE_MAP, where)
try:
data = result['data'][0]
except Exception as e:
data = None
return data
def insert_map(self, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if to_int(id_src) == 0 and to_str(id_src) != '0':
id_src = None
data_inset = {
'migration_id': self._migration_id,
'type': map_type,
'id_src': id_src,
'code_src': code_src,
'id_desc': id_desc,
'code_desc': code_desc,
'value': value,
}
if self.is_wpml() or self.is_polylang():
data_inset['lang'] = lang
insert = self.insert_obj(TABLE_MAP, data_inset)
if (not insert) or (insert['result'] != 'success'):
return False
return insert['data']
def is_wpml(self):
return self._notice[self.get_type()]['support'].get('wpml')
def is_polylang(self):
return self._notice[self.get_type()]['support'].get('polylang')
def get_convert_data_language(self, convert, src_language_id = None, target_language_id = None, key_language = 'languages'):
if not self.is_wpml() and not self.is_polylang():
return convert
list_language_data = convert.get(key_language)
if not list_language_data:
return convert
language_data = None
if src_language_id:
if list_language_data.get(to_str(src_language_id)):
language_data = list_language_data[to_str(src_language_id)]
elif target_language_id:
for src_id, data in list_language_data.items():
if self._notice['map']['languages'].get(to_str(src_id)) == target_language_id:
language_data = data
break
if not language_data:
return convert
for key_lang, value in language_data.items():
if not value:
continue
if key_lang == 'option_value_name' and convert.get('option_type') == self.OPTION_MULTISELECT and 'position_option' in convert:
value_lang = to_str(value).split(';')
if len(value_lang) > to_int(convert.get('position_option')):
value = value_lang[to_int(convert.get('position_option'))]
convert[key_lang] = value
return convert
def get_pro_attr_code_default(self, option):
if self.is_wpml():
option = self.get_convert_data_language(option, None, self._notice['target']['language_default'], 'option_languages')
pro_attr_code = to_str(option['option_name']).lower()
# attribute_name = option['option_name']
pro_attr_code = pro_attr_code.replace(' ', '_')
if option['option_code']:
pro_attr_code = to_str(option['option_code']).lower()
pro_attr_code = pro_attr_code.replace(' ', '_')
pro_attr_code_len = 28
check_encode = chardet.detect(pro_attr_code.encode())
if check_encode['encoding'] != 'ascii':
pro_attr_code = pro_attr_code[0:14]
pro_attr_code_len = 200
pro_attr_code = self.sanitize_title(pro_attr_code, pro_attr_code_len)
return pro_attr_code
def get_slug_attr(self, option_value, language_code = None):
if option_value['option_value_code']:
return self.sanitize_title(to_str(option_value['option_value_code'])).lower()
attribute_value = option_value['option_value_name']
if self.is_wpml():
if not language_code:
language_code = self._notice['target']['language_default']
value_data = self.get_convert_data_language(option_value, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
return self.sanitize_title(to_str(attribute_value).lower())
def get_key_check_default(self, attributes):
key_check = ''
for children_attribute in attributes:
if self.is_wpml():
children_attribute = self.get_convert_data_language(children_attribute, None, self._notice['target']['language_default'], 'option_value_languages')
if key_check:
key_check += '|'
key_check += to_str(children_attribute['option_name']) + ':' + to_str(children_attribute['option_value_name'])
return key_check
def lecm_rewrite_table_construct(self):
return {
'table': '_DBPRF_lecm_rewrite',
'rows': {
'id': 'INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY',
'link': 'VARCHAR(255)',
'type': 'VARCHAR(255)',
'type_id': 'INT(11)',
'redirect_type': 'SMALLINT(5)',
},
}
def is_woo2woo(self):
return self._notice['src']['cart_type'] == self._notice['target']['cart_type']
def check_sync_child(self, child, combination, check_any = False):
for attribute in combination:
if not check_any:
if to_str(child.get(attribute['option_name'])) != to_str(attribute['option_value_name']):
if to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
elif to_str(child.get(attribute['option_name'])) and to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
return True
def select_all_category_map(self):
where = dict()
where['migration_id'] = self._migration_id
where['type'] = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
result = self.select_obj(TABLE_MAP, where)
data = list()
if result['result'] == 'success' and result['data']:
data = result['data']
result_data = list()
if data:
for row in data:
value = row['id_desc']
result_data.append(value)
return result_data
def create_file_variant_limit(self):
file_path = get_pub_path() + '/media/' + to_str(self._migration_id)
if not os.path.exists(file_path):
os.makedirs(file_path, mode = 0o777)
file_name = file_path + '/variants.csv'
column = ['src_id', 'target_id', 'name', 'sku', 'variants']
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def warning_variant_limit(self, convert):
if convert['id']:
product = "#" + to_str(convert['id'])
else:
product = ': ' + to_str(convert['code'])
self.sleep_time(0, 'variant', True, msg = product)
def log_variant_limit(self, product_id, convert, variants):
self.is_variant_limit = True
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if not os.path.isfile(file_name):
self.create_file_variant_limit()
column = [convert['id'] if convert['id'] else convert['code'], product_id, convert['name'], convert['sku'], variants]
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def check_slug_exist(self, slug = None):
select = {
'slug': slug,
}
category_data = self.select_data_connector(self.create_select_query_connector('terms', select))
try:
term_id = category_data['data'][0]['term_id']
except Exception:
term_id = False
return term_id
def get_query_img_wpml(self, img_id, language_code):
source_language_code = self._notice['target']['language_default']
default_language_code = language_code
if source_language_code == default_language_code:
default_language_code = source_language_code
source_language_code = None
trid = self.get_new_trid()
wpml_img_data = {
'element_type': 'post_attachment',
'element_id': img_id,
'trid': trid,
'language_code': default_language_code,
'source_language_code': source_language_code
}
wpml_img_query = self.create_insert_query_connector("icl_translations", wpml_img_data)
return wpml_img_query
def check_exist_code_product(self, code_product):
check = self.select_data_connector(self.create_select_query_connector('posts', {'posttype'}))
def _get_customer_lookup_id(self, user_id):
if not user_id:
return 0
select = {
'user_id': user_id,
}
customer_lookup_data = self.select_data_connector(self.create_select_query_connector('wc_customer_lookup', select))
try:
customer_lookup_id = customer_lookup_data['data'][0]['customer_id']
except Exception:
customer_lookup_id = 0
return customer_lookup_id
|
[
"noreply@github.com"
] |
phamjmanh.noreply@github.com
|
c20964abbec15db7585fd3751381d8fa15369663
|
615e9f144757adb7bf5c7f339416207d61937f72
|
/Input/eingabe_gehalt.py
|
fa72b411532a187e3687427ce28b1246bdc8717e
|
[] |
no_license
|
sewei9/Python-Fundamentals
|
8b6e608606bad598ac0ead6b913a11d380a7186c
|
b1dc4fcfcfc70ff6c43cdfbedfe78f006f05d8db
|
refs/heads/master
| 2020-06-05T03:21:40.618410
| 2020-02-07T10:16:39
| 2020-02-07T10:16:39
| 192,296,257
| 0
| 0
| null | 2020-02-07T10:16:40
| 2019-06-17T07:27:17
|
Python
|
UTF-8
|
Python
| false
| false
| 336
|
py
|
#Eingabe Bruttogehalt
print("Bitte geben Sie ihr Bruttogehalt in Euro an:")
xgh = float(input())
# Eingabe in Zahl umwandeln
zahl = int(xgh)
# Berechnung des Bruttogehalts
steuern = xgh * 0.18
bg = xgh - xgh * 0.18
#Ausgabe Bruttogehalt
print("Ihre Steuerabgaben belaufen sich auf:", steuern)
print("Ihr Nettogehalt beträgt:", bg)
|
[
"sebastian.weiss2@ikea.com"
] |
sebastian.weiss2@ikea.com
|
0f265b3a2d2b97a028449761d2a6938118f65810
|
969cbaccd694c60b92eb14a3a3c51908bfb8217a
|
/kalkulator.py
|
eb221766207c846589c27df5ad2928b162cd434d
|
[] |
no_license
|
Asia1506/kalkulator_if_2016
|
9ff2a1b82d2d1693ae69a379a96e6f1394fee801
|
891f38aac62a913be18bcf28a1ace40a5af49358
|
refs/heads/master
| 2021-01-01T04:55:52.968136
| 2016-05-10T18:15:21
| 2016-05-10T18:15:21
| 58,479,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
def get_info():
print("To jest program kalkulator. Autor: UEP")
def dodawanie(a,b):
return a+ b
get_info()
a = int(input())
b = int(input())
print(dodawanie(a,b))
|
[
"student@student.ue.poznan.pl"
] |
student@student.ue.poznan.pl
|
66d3fe033d3d270d2c2da8ee4f9ac89370418fb2
|
0db05f7b843e8450bafd5ae23f8f70f9a9a8c151
|
/Src/StdLib/Lib/test/test_importhooks.py
|
1245cb9b7966882ffda9795583482614056031a7
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython2
|
9c7f85bd8e6bca300e16f8c92f6384cecb979a6a
|
d00111890ce41b9791cb5bc55aedd071240252c4
|
refs/heads/master
| 2023-01-21T21:17:59.439654
| 2023-01-13T01:52:15
| 2023-01-13T01:52:15
| 91,620,472
| 1,171
| 288
|
Apache-2.0
| 2023-01-13T01:52:16
| 2017-05-17T21:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,432
|
py
|
import sys
import imp
import os
import unittest
from test import test_support
test_src = """\
def get_name():
return __name__
def get_file():
return __file__
"""
absimp = "import sub\n"
relimp = "from . import sub\n"
deeprelimp = "from .... import sub\n"
futimp = "from __future__ import absolute_import\n"
reload_src = test_src+"""\
reloaded = True
"""
test_co = compile(test_src, "<???>", "exec")
reload_co = compile(reload_src, "<???>", "exec")
test2_oldabs_co = compile(absimp + test_src, "<???>", "exec")
test2_newabs_co = compile(futimp + absimp + test_src, "<???>", "exec")
test2_newrel_co = compile(relimp + test_src, "<???>", "exec")
test2_deeprel_co = compile(deeprelimp + test_src, "<???>", "exec")
test2_futrel_co = compile(futimp + relimp + test_src, "<???>", "exec")
test_path = "!!!_test_!!!"
class TestImporter:
modules = {
"hooktestmodule": (False, test_co),
"hooktestpackage": (True, test_co),
"hooktestpackage.sub": (True, test_co),
"hooktestpackage.sub.subber": (True, test_co),
"hooktestpackage.oldabs": (False, test2_oldabs_co),
"hooktestpackage.newabs": (False, test2_newabs_co),
"hooktestpackage.newrel": (False, test2_newrel_co),
"hooktestpackage.sub.subber.subest": (True, test2_deeprel_co),
"hooktestpackage.futrel": (False, test2_futrel_co),
"sub": (False, test_co),
"reloadmodule": (False, test_co),
}
def __init__(self, path=test_path):
if path != test_path:
# if out class is on sys.path_hooks, we must raise
# ImportError for any path item that we can't handle.
raise ImportError
self.path = path
def _get__path__(self):
raise NotImplementedError
def find_module(self, fullname, path=None):
if fullname in self.modules:
return self
else:
return None
def load_module(self, fullname):
ispkg, code = self.modules[fullname]
mod = sys.modules.setdefault(fullname,imp.new_module(fullname))
mod.__file__ = "<%s>" % self.__class__.__name__
mod.__loader__ = self
if ispkg:
mod.__path__ = self._get__path__()
exec code in mod.__dict__
return mod
class MetaImporter(TestImporter):
def _get__path__(self):
return []
class PathImporter(TestImporter):
def _get__path__(self):
return [self.path]
class ImportBlocker:
"""Place an ImportBlocker instance on sys.meta_path and you
can be sure the modules you specified can't be imported, even
if it's a builtin."""
def __init__(self, *namestoblock):
self.namestoblock = dict.fromkeys(namestoblock)
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError, "I dare you"
class ImpWrapper:
def __init__(self, path=None):
if path is not None and not os.path.isdir(path):
raise ImportError
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, stuff = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, stuff)
class ImpLoader:
def __init__(self, file, filename, stuff):
self.file = file
self.filename = filename
self.stuff = stuff
def load_module(self, fullname):
mod = imp.load_module(fullname, self.file, self.filename, self.stuff)
if self.file:
self.file.close()
mod.__loader__ = self # for introspection
return mod
class ImportHooksBaseTestCase(unittest.TestCase):
def setUp(self):
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
self.modules_before = sys.modules.copy()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
sys.modules.clear()
sys.modules.update(self.modules_before)
class ImportHooksTestCase(ImportHooksBaseTestCase):
def doTestImports(self, importer=None):
import hooktestmodule
import hooktestpackage
import hooktestpackage.sub
import hooktestpackage.sub.subber
self.assertEqual(hooktestmodule.get_name(),
"hooktestmodule")
self.assertEqual(hooktestpackage.get_name(),
"hooktestpackage")
self.assertEqual(hooktestpackage.sub.get_name(),
"hooktestpackage.sub")
self.assertEqual(hooktestpackage.sub.subber.get_name(),
"hooktestpackage.sub.subber")
if importer:
self.assertEqual(hooktestmodule.__loader__, importer)
self.assertEqual(hooktestpackage.__loader__, importer)
self.assertEqual(hooktestpackage.sub.__loader__, importer)
self.assertEqual(hooktestpackage.sub.subber.__loader__, importer)
TestImporter.modules['reloadmodule'] = (False, test_co)
import reloadmodule
self.assertFalse(hasattr(reloadmodule,'reloaded'))
TestImporter.modules['reloadmodule'] = (False, reload_co)
imp.reload(reloadmodule)
self.assertTrue(hasattr(reloadmodule,'reloaded'))
import hooktestpackage.oldabs
self.assertEqual(hooktestpackage.oldabs.get_name(),
"hooktestpackage.oldabs")
self.assertEqual(hooktestpackage.oldabs.sub,
hooktestpackage.sub)
import hooktestpackage.newrel
self.assertEqual(hooktestpackage.newrel.get_name(),
"hooktestpackage.newrel")
self.assertEqual(hooktestpackage.newrel.sub,
hooktestpackage.sub)
import hooktestpackage.sub.subber.subest as subest
self.assertEqual(subest.get_name(),
"hooktestpackage.sub.subber.subest")
self.assertEqual(subest.sub,
hooktestpackage.sub)
import hooktestpackage.futrel
self.assertEqual(hooktestpackage.futrel.get_name(),
"hooktestpackage.futrel")
self.assertEqual(hooktestpackage.futrel.sub,
hooktestpackage.sub)
import sub
self.assertEqual(sub.get_name(), "sub")
import hooktestpackage.newabs
self.assertEqual(hooktestpackage.newabs.get_name(),
"hooktestpackage.newabs")
self.assertEqual(hooktestpackage.newabs.sub, sub)
def testMetaPath(self):
i = MetaImporter()
sys.meta_path.append(i)
self.doTestImports(i)
def testPathHook(self):
sys.path_hooks.append(PathImporter)
sys.path.append(test_path)
self.doTestImports()
def testBlocker(self):
mname = "exceptions" # an arbitrary harmless builtin module
test_support.unload(mname)
sys.meta_path.append(ImportBlocker(mname))
self.assertRaises(ImportError, __import__, mname)
@unittest.skipIf(sys.platform == 'cli', 'No module named parser.')
def testImpWrapper(self):
i = ImpWrapper()
sys.meta_path.append(i)
sys.path_hooks.append(ImpWrapper)
mnames = ("colorsys", "urlparse", "distutils.core", "compiler.misc")
for mname in mnames:
parent = mname.split(".")[0]
for n in sys.modules.keys():
if n.startswith(parent):
del sys.modules[n]
with test_support.check_warnings(("The compiler package is deprecated "
"and removed", DeprecationWarning)):
for mname in mnames:
m = __import__(mname, globals(), locals(), ["__dummy__"])
m.__loader__ # to make sure we actually handled the import
def test_main():
test_support.run_unittest(ImportHooksTestCase)
if __name__ == "__main__":
test_main()
|
[
"pawel.jasinski@gmail.com"
] |
pawel.jasinski@gmail.com
|
04b1094f65b4a4fc7502ed6377fbc11d675ebac1
|
0d3bcb7078b5985f5ce2dd00583045d24dffebb0
|
/Exercise-1/RANSAC.py
|
8ae1470b38978b61d0c5aa87728d6a68c4e6ba6c
|
[] |
no_license
|
umerjamil16/RoboND-Perception-Exercises
|
544687dafbd91971cf07fab42d1eeeae76f90422
|
c71e70cdd15c12804e78461417f6a1772c31a89a
|
refs/heads/master
| 2020-05-24T12:38:16.450120
| 2019-05-17T19:46:09
| 2019-05-17T19:46:09
| 187,272,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
# Import PCL module
import pcl
# Load Point Cloud file
cloud = pcl.load_XYZRGB('tabletop.pcd')
# Voxel Grid filter
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) is a poor choice of leaf size
# Experiment and find the appropriate size!
LEAF_SIZE = 1
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
filename = 'voxel_downsampled.pcd'
pcl.save(cloud_filtered, filename)
# PassThrough filter
# Create a PassThrough filter object.
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0
axis_max = 2
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
filename = 'pass_through_filtered.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC plane segmentation
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 1
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
# Extract inliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
# Save pcd for table
# pcl.save(cloud, filename)
filename = 'extracted_inliers.pcd'
pcl.save(extracted_inliers, filename)
# Extract outliers
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
# Save pcd for tabletop objects
filename = 'extracted_outliers.pcd'
pcl.save(extracted_outliers, filename)
|
[
"umerjamil16@gmail.com"
] |
umerjamil16@gmail.com
|
4d0c4be14b6b09ec3251c2838bcf588741ca742d
|
803cf1530759df60c247e7e6594bba0dae5ac72e
|
/notes_graphomaniac/urls.py
|
ee88eafd13ecb24d5866a5725931b81dc5104e8b
|
[] |
no_license
|
annalitvin/GraphomaniacNotes
|
f016873d4ec4acfb9397f4c84005ea92f1c04858
|
1d699c2b0111e7c1340e9a0a5c530abbf4da6ea0
|
refs/heads/master
| 2020-03-19T12:55:47.221439
| 2018-06-08T02:07:44
| 2018-06-08T02:07:44
| 136,550,467
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('add_notes/', views.NoteFormView.as_view(), name='note_input'),
path('notes/', views.NotesListView.as_view(), name='notes_list'),
path('success/', views.success, name='success')
]
|
[
"litvin_any@ukr.net"
] |
litvin_any@ukr.net
|
6ad19c82856fa6ac2a65b8bb061e10acfa66d584
|
91ccebfe1afcec9fe91e33d7951eedb73a115f37
|
/Sina_spider/Sina_spider/pipelines.py
|
a30ffdff77f6aed04bb7843c4e9146df6d1629d2
|
[] |
no_license
|
weinuonuo/python
|
30a96c692b4e3a4cbaf1603ee3dc3fc7f513498e
|
77aaa584277a33fe347d7d2ea495352d26aec6f7
|
refs/heads/master
| 2021-01-23T05:30:01.673276
| 2018-01-22T14:46:36
| 2018-01-22T14:46:36
| 102,471,348
| 0
| 0
| null | 2017-09-05T11:09:19
| 2017-09-05T11:09:19
| null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from . import items
import logging
class MongoDBPipleline(object):
def __init__(self):
clinet = pymongo.MongoClient("localhost", 27017)
db = clinet["Sina"]
self.Information = db["Information"]
self.Tweets = db["Tweets"]
self.Relationships = db["Relationships"]
self.Comments = db["Comments"]
self.Reposts = db['Reposts']
def process_item(self, item, spider):
""" 判断item的类型,并作相应的处理,再入数据库 """
if isinstance(item, items.InformationItem):
try:
logging.warning("向数据库存入个人信息...")
self.Information.insert(dict(item))
except Exception:
logging.warning("数据已存在,存入个人信息失败/(ㄒoㄒ)/~~")
pass
elif isinstance(item, items.TweetsItem):
try:
logging.warning("向数据库存入微博信息...")
self.Tweets.insert(dict(item))
except Exception:
logging.warning("数据已存在,存入微博信息数据失败/(ㄒoㄒ)/~~")
pass
elif isinstance(item, items.RelationshipsItem):
try:
logging.warning("向数据库存入关系数据信息...")
self.Relationships.insert(dict(item))
except Exception:
logging.warning("数据已存在,存入两者关系数据失败/(ㄒoㄒ)/~~")
pass
elif isinstance(item,items.CommentsItem):
try:
logging.warning("向数据库存入微博评论信息...")
self.Comments.insert(dict(item))
except Exception:
logging.warning("数据已存在,存入数据库失败...")
pass
elif isinstance(item,items.RepostsItem):
try:
logging.warning("向数据库存入微博转发信息...")
self.Reposts.insert(dict(item))
except Exception:
logging.warning("数据已存在,存入数据库失败...")
pass
return item
# class MyImagesPipeline(ImagesPipeline):
# def file_path(self, request, response=None, info=None):
# image_guid = request.url.split('/')[-1]
# return 'full/%s' % (image_guid)
# def get_media_requests(self, item, info):
# if isinstance(item, items.InformationItem):
# for image_url in item['img_url']:
# yield Request(image_url)
# def item_completed(self, results, item, info):
# image_paths = [x['path'] for ok, x in results if ok]
# if not image_paths:
# raise DropItem("Item contains no images")
# return item
|
[
"noreply@github.com"
] |
weinuonuo.noreply@github.com
|
e74279983379618436bf5dc736d18bcc94012bc1
|
31eb73d1c84c29a462b45c706f440dcdd52eea64
|
/tensor_practicing/tensor_reshaping.py
|
d3ad1053e396304725b9dc1085e959b66bd44183
|
[] |
no_license
|
harisyammnv/streamlit-ml-apps
|
0e2ed85f12ccdc8315f55946a4a1c20586ee372e
|
42282f665def34fa34f65475a645c7f7f89dd88b
|
refs/heads/master
| 2023-02-08T07:04:50.548867
| 2020-12-20T13:13:07
| 2020-12-20T13:13:07
| 303,171,746
| 0
| 0
| null | 2020-12-20T13:13:08
| 2020-10-11T17:09:09
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
import torch
x = torch.arange(9)
x_re = x.view(3, 3) # for contiguous tensors
x_re = x.reshape(3, 3) # use this to be safe
print(x_re)
y = x_re.t()
print(y)
x1 = torch.rand((2, 5))
x2 = torch.rand((2, 5))
print(torch.cat((x1, x2), dim=0).shape)
print(torch.cat((x1, x2), dim=1).shape)
z = x_re.view(-1) # flatten
print(z)
batch = 64
x = torch.rand((batch, 2, 5))
z = x.view(batch, -1)
print(z.shape)
z = x.permute(0, 2, 1) # transpose for multiple dimensions
print(z.shape)
x = torch.arange(10)
print(x.unsqueeze(0).shape)
print(x.unsqueeze(1).shape)
print(x.unsqueeze(0).unsqueeze(1).shape)
|
[
"harisyam.bphc@gmail.com"
] |
harisyam.bphc@gmail.com
|
b931c19a220aa8fe84a292b316cf0cb51fbce4ee
|
18f1bd950c5adc6de1cc59637e685c26a03f9adf
|
/is_bst.py
|
8515f31758f61fb54b6f6a5d908a89c28514c316
|
[] |
no_license
|
alaouiib/DS_and_Algorithms_Training
|
a248d429c7ff02b6bad268e7132b31e67656441c
|
479434b1e1ae8fa62b47f718087b9fc71d78550a
|
refs/heads/main
| 2023-03-30T19:40:39.339035
| 2021-03-31T20:05:37
| 2021-03-31T20:05:37
| 324,452,373
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
import unittest
# complexity O(n) Time and Space
def is_binary_search_tree(root):
# Determine if the tree is a valid binary search tree
# idea (inspired by interviewcake):
## We do a depth-first walk through the tree,
## testing each node for validity as we go.
## If a node appears in the left subtree of an ancestor,
## it must be less than that ancestor.
## If a node appears in the right subtree of an ancestor,
## it must be greater than that ancestor.
node_and_bounds_stack = [(root,-float('inf'),float('inf'))]
while len(node_and_bounds_stack):
node, lower_bound, upper_bound = node_and_bounds_stack.pop()
# 2 cases, node or leaf.
# If this node is invalid, we return false right away
if node.value <= lower_bound or node.value >= upper_bound:
return False
if node.left:
# This node must be less than the current node
node_and_bounds_stack.append([node.left,lower_bound,node.value])
# This node must be greater than the current node
if node.right:
node_and_bounds_stack.append([node.right,node.value,upper_bound])
return True
# Tests (by interview cake)
class Test(unittest.TestCase):
class BinaryTreeNode(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert_left(self, value):
self.left = Test.BinaryTreeNode(value)
return self.left
def insert_right(self, value):
self.right = Test.BinaryTreeNode(value)
return self.right
def test_valid_full_tree(self):
tree = Test.BinaryTreeNode(50)
left = tree.insert_left(30)
right = tree.insert_right(70)
left.insert_left(10)
left.insert_right(40)
right.insert_left(60)
right.insert_right(80)
result = is_binary_search_tree(tree)
self.assertTrue(result)
def test_both_subtrees_valid(self):
tree = Test.BinaryTreeNode(50)
left = tree.insert_left(30)
right = tree.insert_right(80)
left.insert_left(20)
left.insert_right(60)
right.insert_left(70)
right.insert_right(90)
result = is_binary_search_tree(tree)
self.assertFalse(result)
def test_descending_linked_list(self):
tree = Test.BinaryTreeNode(50)
left = tree.insert_left(40)
left_left = left.insert_left(30)
left_left_left = left_left.insert_left(20)
left_left_left.insert_left(10)
result = is_binary_search_tree(tree)
self.assertTrue(result)
def test_out_of_order_linked_list(self):
tree = Test.BinaryTreeNode(50)
right = tree.insert_right(70)
right_right = right.insert_right(60)
right_right.insert_right(80)
result = is_binary_search_tree(tree)
self.assertFalse(result)
def test_one_node_tree(self):
tree = Test.BinaryTreeNode(50)
result = is_binary_search_tree(tree)
self.assertTrue(result)
unittest.main(verbosity=2)
|
[
"noreply@github.com"
] |
alaouiib.noreply@github.com
|
8d3d7abfac4b4248bf542c9cc6233605c8a2b7c3
|
282f240be6e7236f5388caa277b189dbe52d7359
|
/create_root_csv_pp_WH.py
|
13e1eef8bc8fd9cb0df0a8a98f63ac1ed0675a57
|
[] |
no_license
|
FFFreitas/Root-Numpy-Pandas
|
bec13c8126080ac83bbbdf543d2ffe3e469d60d0
|
47d3885a95d1bced8ccea2b22f14ab0c4c1c5849
|
refs/heads/master
| 2020-03-27T06:56:56.412727
| 2018-08-26T04:53:08
| 2018-08-26T04:53:08
| 146,149,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,654
|
py
|
#!/usr/bin/python
import sys
import ROOT
import numpy as np
from ROOT import TLorentzVector
import csv
import pandas as pd
from ROOT import TFile, TTree
from rootpy.io import root_open
from rootpy.tree import Tree, TreeChain
from rootpy.plotting import Hist
from rootpy.plotting import Hist2D
from rootpy.extern.six.moves import range
from root_numpy import hist2array, root2array
from itertools import combinations, permutations
if len(sys.argv) < 2:
print " Usage: Example1.py input_file"
sys.exit(1)
ROOT.gSystem.Load("/home/felipe/madanalysis5_1_5/tools/delphes/libDelphes")
inputFile = sys.argv[1]
# Create chain of root trees
chain1 = ROOT.TChain("Delphes")
chain1.Add(inputFile)
# Create object of class ExRootTreeReader
treeReader = ROOT.ExRootTreeReader(chain1)
numberOfEntries = treeReader.GetEntries()
# create new root file
root_name = raw_input("name of new root: ")
csv_name = raw_input("name of new csv: ")
f = root_open(root_name, "recreate")
tree = Tree("test")
tree.create_branches({'PT_l': 'F',
'MT_VH': 'F',
'PT_VH': 'F',
'PT_W': 'F',
'Cos_lw': 'F',
'DPHI_lmet': 'F',
'met': 'F',
'PT_b1': 'F',
'PT_b2': 'F',
'PT_lj1': 'F',
'PT_lj2': 'F',
'Eta_H': 'F',
'Phi_H': 'F',
'M_H': 'F',
'MT_W': 'F',
'Cos_Hb1': 'F',
'PT_H': 'F',
})
# Get pointers to branches used in this analysis
branchJet = treeReader.UseBranch("Jet")
branchElectron = treeReader.UseBranch("Electron")
branchMuon = treeReader.UseBranch("Muon")
branchPhoton = treeReader.UseBranch("Photon")
branchMET = treeReader.UseBranch("MissingET")
####################################################################
# Loop over all events
for entry in range(0, numberOfEntries):
# Load selected branches with data from specified event
treeReader.ReadEntry(entry)
##########################################################################################################
eletrons = sorted(branchElectron, key=lambda Electron: Electron.PT, reverse=True)
missing = sorted(branchMET, key=lambda MisingET: MisingET.MET, reverse=True)
elec1 = eletrons[0]
eletron1 = ROOT.TLorentzVector()
eletron1.SetPtEtaPhiE(elec1.PT,elec1.Eta,elec1.Phi,elec1.P4().E())
met = ROOT.TLorentzVector()
met.SetPtEtaPhiE(missing[0].P4().Pt(),missing[0].P4().Eta(),missing[0].P4().Phi(),missing[0].P4().E())
bjato1 = ROOT.TLorentzVector()
bjato2 = ROOT.TLorentzVector()
jato1 = ROOT.TLorentzVector()
jato2 = ROOT.TLorentzVector()
####################################################################################
bjets, ljets = [], []
for n in xrange(branchJet.GetEntries()):
if branchJet.At(n).BTag == 1:
bjets.append(branchJet.At(n))
else:
ljets.append(branchJet.At(n))
if len(bjets) >= 2:
bjets = sorted(bjets, key=lambda BJet: BJet.P4().Pt(), reverse=True)
else:
continue
if len(ljets) >= 2:
ljets = sorted(ljets, key=lambda Jet: Jet.P4().Pt(), reverse=True)
else:
continue
####################################################################################
jato1.SetPtEtaPhiE(ljets[0].P4().Pt(),ljets[0].P4().Eta(),ljets[0].P4().Phi(),ljets[0].P4().E())
jato2.SetPtEtaPhiE(ljets[1].P4().Pt(),ljets[1].P4().Eta(),ljets[1].P4().Phi(),ljets[1].P4().E())
####################################################################################
bjato1.SetPtEtaPhiE(bjets[0].P4().Pt(),bjets[0].P4().Eta(),bjets[0].P4().Phi(),bjets[0].P4().E())
bjato2.SetPtEtaPhiE(bjets[1].P4().Pt(),bjets[1].P4().Eta(),bjets[1].P4().Phi(),bjets[1].P4().E())
####################################################################################
if 115 < (bjato1 + bjato2).M() < 135:
tree.PT_l = (eletron1).Pt()
tree.met = np.abs(met.Mt())
tree.PT_b1 = (bjato1).Pt()
tree.PT_b2 = (bjato2).Pt()
tree.PT_lj1 = jato1.Pt()
tree.PT_lj2 = jato2.Pt()
tree.PT_H = (bjato1 + bjato2).Pt()
tree.Eta_H = (bjato1 + bjato2).Eta()
W = ROOT.TLorentzVector()
W = (eletron1 + met)
tree.DPHI_lmet = np.abs(eletron1.DeltaPhi(met))
tree.MT_W = np.sqrt(2*np.abs(met.Et())*np.abs(eletron1.Pt())*(1-np.cos(eletron1.DeltaPhi(met))))
tree.PT_W = W.Pt()
H = ROOT.TLorentzVector()
H = (bjato1 + bjato2)
tree.MT_VH = (W + H).Mt() #H.Mt() + np.sqrt(2*np.abs(met.Et())*np.abs(eletron1.Pt())*(1-np.cos(eletron1.DeltaPhi(met))))
tree.PT_VH = ((bjato1 + bjato2) + (eletron1 + met)).Pt()
tree.Phi_H = H.Phi()
tree.M_H = H.M()
#########################boosted objects#########################################################
Wtob = ROOT.TLorentzVector()
Wtob.SetPxPyPzE(W.Px(),W.Py(),W.Pz(),W.E())
Wboost = ROOT.TVector3()
Wboost = Wtob.BoostVector()
v = Wboost.Unit()
Htob = ROOT.TLorentzVector()
Htob.SetPxPyPzE(H.Px(),H.Py(),H.Pz(),H.E())
Hboost = ROOT.TVector3()
Hboost = Htob.BoostVector()
ang = Hboost.Unit()
bjato1.Boost(-Hboost)
tree.Cos_Hb1 = np.cos(bjato1.Angle(ang))
eletron1.Boost(-Wboost)
tree.Cos_lw = np.cos(eletron1.Angle(v))
tree.Fill()
###############################################
tree.write()
f.close()
#create the csv output
to_convert = root2array(root_name,'test')
df_conv = pd.DataFrame(to_convert)
df_conv.to_csv( csv_name + '.csv', index=False, header= df_conv.keys(), mode='w', sep=' ')
|
[
"noreply@github.com"
] |
FFFreitas.noreply@github.com
|
7e4ade14fee3082c03412550a04d502ef0ffacdb
|
77da6217bf83d41b2fe479d6e414a1df4f997b3c
|
/transient/api.py
|
a306dffbf74d4b20f133083e80405b2bbb991231
|
[
"MIT"
] |
permissive
|
zgreat/transient
|
e4deb14951dc05692bc1ccb624c66cf394bc9664
|
1cfc1fe65079ef3c75754eaa0cd97f7ebb55664a
|
refs/heads/master
| 2021-05-30T10:49:40.529829
| 2015-12-20T03:46:39
| 2015-12-20T03:46:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
from os import environ
from flask import Flask, jsonify, request, send_file
from transient.lib.database import session
app = Flask(__name__)
def run():
host = environ.get("HOST", "127.0.0.1")
port = int(environ.get("PORT", 3000))
debug = environ.get("DEBUG", False)
app.run(host=host, port=port, debug=debug)
@app.route("/")
def get_root():
return "Sup?"
@app.route("/ping")
def get_ping():
return "pong"
@app.route("/payments", methods=['POST'])
def post_payment():
from transient.services.payments import create_payment
try:
payment = create_payment(**request.json)
session.add(payment)
session.commit()
except:
session.rollback()
return jsonify({
'success': False
})
else:
return jsonify({
'success': True,
'payment': payment.to_dict()
})
finally:
session.remove()
@app.route("/transactions", methods=['POST'])
def post_transaction():
from transient.services.transactions import create_transaction
try:
transaction = create_transaction(**request.json)
session.add(transaction)
session.commit()
except Exception, e:
session.rollback()
return jsonify({
'success': False
})
else:
return jsonify({
'success': True,
'transaction': transaction.to_dict()
})
finally:
session.remove()
@app.route("/payments/<payment_id>/qrcode.png", methods=['GET'])
def get_qrcode(payment_id):
from transient.services.payments import get_payment_qrcode
image = get_payment_qrcode(payment_id)
return serve_pil_image(image, "png")
@app.teardown_appcontext
def shutdown_session(exception=None):
session.remove()
def serve_pil_image(pil_img, img_format="jpeg"):
from StringIO import StringIO
img_io = StringIO()
pil_img.save(img_io, img_format.upper())
img_io.seek(0)
return send_file(img_io, mimetype='image/%s' % (img_format.lower()))
|
[
"sam@sammilledge.com"
] |
sam@sammilledge.com
|
4260ab2e8ab755b654a33d3503f9795531987c52
|
14b95fd582fe1f523348ea68db94dbc8e5396b8b
|
/main.py
|
3f2f0e3391b498dd6648bf65c69d964360f63fa8
|
[] |
no_license
|
YaSlavar/numerical_method
|
cb7fb1bd8720daf0581e13bf752bcf4f1bb133a6
|
d98577af099430bed9163aae338947b00f446b5d
|
refs/heads/master
| 2020-07-10T08:12:21.440755
| 2019-08-24T21:34:23
| 2019-08-24T21:34:23
| 204,214,889
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,197
|
py
|
from math import *
class Integral:
def __init__(self, a, b, eps, step, func):
"""
Вычисление определенных интегралов численными методами
:param a: левый предел интегрирования (float)
:param b: правый предел интегрирования (float)
:param eps: погрешность вычисления (float)
:param step: шаг сканирования (float)
:param func: вычисляемая функция (str)
"""
self.func = func
self.a = a
self.b = b
self.eps = eps
self.step = step
eps_str = '{:f}'.format(self.eps)
eps_str = eps_str.rstrip("0")
self.after_dicimal = len(eps_str.split(".")[1])
def function(self, x):
exec("x={}\nres={}".format(x, self.func))
return locals()['res']
def Metod_Levych_Pryamougolnikov(self, a, b, step):
h = (b - a) / step
x = a
summ = 0
detision = "integral = {}*(".format(step)
for i in range(int(h)):
f_x = round(self.function(x), self.after_dicimal)
summ += f_x
print("x={} f(x)={}".format(x, f_x))
if i+1 == h:
detision += "{})".format(f_x)
else:
detision += "{} + ".format(f_x)
x += step
summ = summ * step
print("{} = {}".format(detision, round(summ, self.after_dicimal)))
def Metod_Srednich_Pryamougolnikov(self, a, b, step):
h = (b - a) / step
x = a + (step / 2)
summ = 0
detision = "integral = {}*(".format(step)
for i in range(int(h)):
f_x = round(self.function(x), self.after_dicimal)
summ += f_x
print("x={} f(x)={}".format(x, f_x))
if i + 1 == h:
detision += "{})".format(f_x)
else:
detision += "{} + ".format(f_x)
x += step
summ = summ * step
print("{} = {}".format(detision, round(summ, self.after_dicimal)))
def Metod_Pravych_Pryamougolnikov(self, a, b, step):
h = (b - a) / step
x = a + step
summ = 0
detision = "integral = {}*(".format(step)
for i in range(int(h)):
f_x = round(self.function(x), self.after_dicimal)
summ += f_x
print("x={} f(x)={}".format(x, f_x))
if i + 1 == h:
detision += "{})".format(f_x)
else:
detision += "{} + ".format(f_x)
x += step
summ = summ * step
print("{} = {}".format(detision, round(summ, self.after_dicimal)))
def Metod_Trapeciy(self, a, b, step):
h = (b - a) / step
x = a
summ = 0
detision = "integral = {}*(f0_fn__2 + (".format(step)
for i in range(1, int(h)+1):
x += step
f_x = round(self.function(x), self.after_dicimal)
print("x={} f(x)={}".format(x, f_x))
if i < h - 1:
summ += f_x
detision += "{} + ".format(f_x)
elif i < h:
summ += f_x
detision += "{})".format(f_x)
elif i == h:
break
f0_fn__2 = (round(self.function(a), self.after_dicimal) +
round(self.function(x), self.after_dicimal)) / 2
detision = detision.replace("f0_fn__2", "({} + {}) / 2".format(round(self.function(a), self.after_dicimal),
round(self.function(x), self.after_dicimal)))
summ += f0_fn__2
summ = summ * step
print("{} = {}".format(detision, round(summ, self.after_dicimal)))
def Metod_Parabol(self, a, b, step):
h = (b - a) / step
step_6 = step / 6
x = a
summ = 0
detision = "integral = {}/6 * (".format(h)
f_0 = round(self.function(x), self.after_dicimal)
summ += f_0
detision += "{} + ".format(f_0)
print("x={} f(x)={}".format(x, f_0))
for i in range(1, int(h) * 2):
x += step / 2
f_x = round(self.function(x), self.after_dicimal)
if i % 2 is not 0:
summ += f_x * 4
detision += "4*({}) + ".format(f_x)
else:
summ += f_x * 2
detision += "2*({}) + ".format(f_x)
print("x={} f(x)={}".format(x, f_x))
x += step / 2
f_n = round(self.function(x), self.after_dicimal)
summ += f_n
summ = summ * step_6
detision += "{})".format(f_n)
print("x={} f(x)={}".format(x, f_n))
print("{} = {}".format(detision, round(summ, self.after_dicimal)))
def run(self):
print('ВЫЧИСЛЕНИЕ ОПРЕДЕЛЕННЫХ ИНТЕГРАЛОВ\n\n')
print('\nМетод левых прямоугольников\n')
self.Metod_Levych_Pryamougolnikov(self.a, self.b, self.step)
print('\nМетод средних прямоугольников\n')
self.Metod_Srednich_Pryamougolnikov(self.a, self.b, self.step)
print('\nМетод правых прямоугольников\n')
self.Metod_Pravych_Pryamougolnikov(self.a, self.b, self.step)
print('\nМетод трапеций\n')
self.Metod_Trapeciy(self.a, self.b, self.step)
print('\nМетод парабол\n')
self.Metod_Parabol(self.a, self.b, self.step)
class Polynome:
def __init__(self, _fns, _fns1, _fns2, _a_b, _eps):
"""
Решение нелинейных уравнений
:param _fns: исходная функция (str)
:param _fns1: первая производная функции (str)
:param _fns2: вотрая производная функции (str)
:param _a_b: отрезок на котором предположительно есть корни (tuple)
:param _eps: погрешность (float)
"""
self.fns = _fns
self.fns_1 = _fns1
self.fns_2 = _fns2
self.a = _a_b[0]
self.b = _a_b[1]
self.eps = _eps
@staticmethod
def func(funcs, x):
exec("x={}\nres={}".format(x, funcs))
return locals()['res']
def run(self):
after_dicimal = 5
ab = [self.a, self.b]
if self.func(self.fns, ab[0]) * self.func(self.fns, ab[1]) < 0:
# Метод половинного деления
print("Метод половинного деления")
print("Дано: \n[{},{}]\n f(a) = {}, f(b) = {}\n"
.format(ab[0], ab[1], self.func(self.fns, ab[0]), self.func(self.fns, ab[1])))
i = 1
while True:
c = round(((ab[0] + ab[1]) / 2), after_dicimal)
print("Итерация {} \nc({}) = ({}+({}))/2 = {}\nf(c{}) = {}\n\n[{},{}][{},{}]"
.format(i, i, ab[0], ab[1], c, i,
round(self.func(self.fns, c), after_dicimal), ab[0], c, c, ab[1]))
if self.func(self.fns, ab[0]) * self.func(self.fns, c) < 0:
ab[1] = round(c, after_dicimal)
elif self.func(self.fns, ab[1]) * self.func(self.fns, c) < 0:
ab[0] = round(c, after_dicimal)
i += 1
if fabs(ab[0] - ab[1]) < 2 * self.eps:
c = (ab[0] + ab[1]) / 2
f_c = self.func(self.fns, c)
print("Значение функции: {} в точке: {}\n\n".format(round(f_c, after_dicimal),
round(c, after_dicimal)))
break
# Метод хорд и касательных
ab = [self.a, self.b]
print("Метод хорд и касательных")
f_a = self.func(self.fns, ab[0])
f_b = self.func(self.fns, ab[1])
f_2_a = self.func(self.fns_2, ab[0])
f_2_b = self.func(self.fns_2, ab[1])
print("Дано: \n[{},{}]\nf(a) = {}, f(b) = {}\nf''(a) = {}, f''(b) = {}\n"
.format(ab[0], ab[1], self.func(self.fns, ab[0]), self.func(self.fns, ab[1]),
self.func(self.fns_2, ab[0]), self.func(self.fns_2, ab[1])))
if abs(f_a - f_2_a) < abs(f_b - f_2_b):
print("Для касательных используем [a.. , т.к. F''(a) ,ближе к краям отрезка")
kas = self.a
hord = self.b
else:
print("Для касательных используем ..b] , т.к. F''(b) ,ближе к краям отрезка")
kas = self.b
hord = self.a
while True:
hord_out = "hord = {} - (({} - ({}))*F({})) / (F({}) - F({}))\n"\
.format(round(hord, after_dicimal), round(kas, after_dicimal), round(hord, after_dicimal),
round(hord, after_dicimal),
round(kas, after_dicimal),
round(hord, after_dicimal))
hord_out += " = {} - (({} - ({}))*{}) / ({} - {})"\
.format(round(hord, after_dicimal), round(kas, after_dicimal), round(hord, after_dicimal),
round(self.func(self.fns, hord), after_dicimal),
round(self.func(self.fns, kas), after_dicimal),
round(self.func(self.fns, hord), after_dicimal))
print(hord_out)
hord = round(hord - ((kas - hord)*self.func(self.fns, hord)) / (self.func(self.fns, kas) - self.func(self.fns, hord)), after_dicimal)
print(" = {}".format(hord))
kas_out = "kasat = {} - (F({}) / F'({}))\n"\
.format(round(kas, after_dicimal), round(kas, after_dicimal),
round(kas, after_dicimal))
kas_out += " = {} - ({} / {})" \
.format(round(kas, after_dicimal), round(self.func(self.fns, kas)),
round(self.func(self.fns_1, kas), after_dicimal))
print(kas_out)
kas = round(kas - (self.func(self.fns, kas) / self.func(self.fns_1, kas)), after_dicimal)
print(" = {}".format(kas))
print("[{},{}]".format(hord, kas))
if fabs(hord - kas) < 2 * self.eps:
answer = (hord + kas) / 2
print("Ответ: ", answer)
break
else:
print("На данном отрезке корня нет")
if __name__ == "__main__":
fns_type = input("integral or polynome: ")
if fns_type in ["i", "integral", "интеграл"]:
fns = input("Введите функцию: ")
a_b = tuple(map(float, input("Введите предел интегрирования [a b] через пробел: ").split()))
eps = float(input("Введите погрешность: "))
step = float(input("Введите шаг: "))
# fns = "(x*x)/pow((1+x), 3)"
# a_b = [0, 2.5]
# eps = 0.00001
# step = 0.5
integ = Integral(a_b[0], a_b[1], eps, step, fns)
integ.run()
else:
fns = input("Введите функцию: ")
fns1 = input("Введите первую производную функции: ")
fns2 = input("Введите вторую производную функции: ")
a_b = tuple(map(int, input("Введите отрензок [a b] на котором есть корень: ").split()))
eps = float(input("Введите погрешность: "))
# fns = "x*x*x-15*x+6"
# fns1 = "3*x*x-15"
# fns2 = "6*x"
# a_b = [1, 0]
# eps = 0.001
pol = Polynome(fns, fns1, fns2, a_b, eps)
pol.run()
|
[
"50412722+YaSlavar@users.noreply.github.com"
] |
50412722+YaSlavar@users.noreply.github.com
|
1407bc24e92e4f55c6f3995b2048cd89cb29cd65
|
82304008e8359460c7e3dd634addc6657c32e529
|
/[HW 3] python-challenge/PyBank/main.py
|
6f1e05b85257cd349a84734104a96907f9bb7947
|
[] |
no_license
|
jamesnguyen0/datasciwork
|
013819ca6ac83fe9b8e3ce7783c58a175fbecb4a
|
a520910e4b5124e73a5c4ed4cc88e0f6b6ee1cd7
|
refs/heads/master
| 2022-11-30T01:13:39.609617
| 2020-08-05T01:57:25
| 2020-08-05T01:57:25
| 271,149,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
#libraries
import os
import csv
#variables
months = 0
netChange = 0
avgChange = 0
maxIncrease = ["", 0]
maxDecrease = ["", 0]
#change calculations
current = 0
previous = 0
changeInChange = 0
#read files
pybank_csv = os.path.join("Resources","budget_data.csv")
with open(pybank_csv) as csvfile:
csvreader = csv.reader(csvfile, delimiter = ",")
#skip header
header = next(csvreader)
#loop through each row
for row in csvreader:
#basic calculations
months += 1
netChange += int(row[1])
current = int(row[1])
#don't execute if current is first value in list
if not(previous == 0):
changeInChange = current - previous
#sum the change in changes
avgChange += changeInChange
if changeInChange > int(maxIncrease[1]):
maxIncrease[0] = row[0]
maxIncrease[1] = changeInChange
if changeInChange < int(maxDecrease[1]):
maxDecrease[0] = row[0]
maxDecrease[1] = changeInChange
previous = int(row[1])
#calculate true average
avgChange = round(avgChange/(months - 1), 2)
#output to console
print("Financial Analysis")
print("----------------------------")
print(f"Total Months: {months}")
print(f"Total: ${netChange}")
print(f"Average Change: ${avgChange}")
print(f"Greatest Increase in Profits: {maxIncrease[0]} (${maxIncrease[1]})")
print(f"Greatest Decrease in Profits: {maxDecrease[0]} (${maxDecrease[1]})")
#prep text for output to .txt
text = []
text.append("Financial Analysis")
text.append("----------------------------")
text.append("Total Months: " + str(months))
text.append("Total: $" + str(netChange))
text.append("Average Change: $" + str(avgChange))
text.append("Greatest Increase in Profits: " + maxIncrease[0] + " $(" + str(maxIncrease[1]) + ")")
text.append("Greatest Decrease in Profits: " + maxDecrease[0] + " $(" + str(maxDecrease[1]) + ")")
outputtext = zip(text)
#write files
output_file = os.path.join("Analysis","PyBank_analysis.txt")
with open(output_file, 'w') as datafile:
writer = csv.writer(datafile, lineterminator='\n')
writer.writerows(outputtext)
|
[
"jamesnguyen0@gmail.com"
] |
jamesnguyen0@gmail.com
|
7c9a95200bd62582f4e0c5a22ee69a05a45879cd
|
66dace688df266de641c6e1bc7e48fdf0e403382
|
/mysite/settings.py
|
72c3e2775f6332d1152aed8f3d071ff2fb8e5d97
|
[] |
no_license
|
dendenthen/mysite
|
27291d6766c197a00fe70cb871e61267a78a46a3
|
213f26e99839247d7e3d93c22f77c8e555e063d0
|
refs/heads/master
| 2021-04-28T16:31:42.727331
| 2018-02-22T01:40:29
| 2018-02-22T01:40:29
| 122,016,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_d4rr%1+@v&&+f*0q97s)#ke13n$_w#phjpj4r9lrlt!rnis9!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"dennis.alan.herbert@gmail.com"
] |
dennis.alan.herbert@gmail.com
|
bb3c3a2fa1d72003f265ef1a73b9a36e5ea55b08
|
6b5e514aa031e19ad1574b3415ee091f71549ed7
|
/lale/lib/autogen/one_hot_encoder.py
|
b4c0c2df965bbd39464bb7bd7e9c6df55be5d876
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
noushi/lale
|
e3db3e3b7e4a4e4b5eda13303c50245612eec370
|
5ba5612737beee5fb2a387eb5f6f9bdec7ffb878
|
refs/heads/master
| 2020-12-04T06:48:13.365418
| 2019-12-29T11:42:42
| 2019-12-29T11:42:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
from sklearn.preprocessing._encoders import OneHotEncoder as SKLModel
import lale.helpers
import lale.operators
from numpy import nan, inf
class OneHotEncoderImpl():
def __init__(self, categories=None, sparse=True, dtype=None, handle_unknown='error', n_values=None, categorical_features=None):
self._hyperparams = {
'categories': categories,
'sparse': sparse,
'dtype': dtype,
'handle_unknown': handle_unknown,
'n_values': n_values,
'categorical_features': categorical_features}
def fit(self, X, y=None):
self._sklearn_model = SKLModel(**self._hyperparams)
if (y is not None):
self._sklearn_model.fit(X, y)
else:
self._sklearn_model.fit(X)
return self
def transform(self, X):
return self._sklearn_model.transform(X)
_hyperparams_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'inherited docstring for OneHotEncoder Encode categorical integer features as a one-hot numeric array.',
'allOf': [{
'type': 'object',
'required': ['categories', 'sparse', 'dtype', 'handle_unknown', 'n_values', 'categorical_features'],
'relevantToOptimizer': ['sparse'],
'additionalProperties': False,
'properties': {
'categories': {
'XXX TODO XXX': "'auto' or a list of lists/arrays of values, default='auto'.",
'description': 'Categories (unique values) per feature:',
'enum': [None],
'default': None},
'sparse': {
'type': 'boolean',
'default': True,
'description': 'Will return sparse matrix if set True else will return an array.'},
'dtype': {
'XXX TODO XXX': 'number type, default=np.float',
'description': 'Desired dtype of output.'},
'handle_unknown': {
'XXX TODO XXX': "'error' or 'ignore', default='error'.",
'description': 'Whether to raise an error or ignore if an unknown categorical feature',
'enum': ['error'],
'default': 'error'},
'n_values': {
'XXX TODO XXX': "'auto', int or array of ints, default='auto'",
'description': 'Number of values per feature.',
'enum': [None],
'default': None},
'categorical_features': {
'XXX TODO XXX': "'all' or array of indices or mask, default='all'",
'description': 'Specify what features are treated as categorical.',
'enum': [None],
'default': None},
}}],
}
_input_fit_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Fit OneHotEncoder to X.',
'type': 'object',
'properties': {
'X': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number'},
},
'description': 'The data to determine the categories of each feature.'},
},
}
_input_transform_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Transform X using one-hot encoding.',
'type': 'object',
'properties': {
'X': {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number'},
},
'description': 'The data to encode.'},
},
}
_output_transform_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Transformed input.',
'XXX TODO XXX': 'sparse matrix if sparse=True else a 2-d array',
}
_combined_schemas = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'description': 'Combined schema for expected data and hyperparameters.',
'type': 'object',
'tags': {
'pre': [],
'op': ['transformer'],
'post': []},
'properties': {
'hyperparams': _hyperparams_schema,
'input_fit': _input_fit_schema,
'input_transform': _input_transform_schema,
'output_transform': _output_transform_schema},
}
if (__name__ == '__main__'):
lale.helpers.validate_is_schema(_combined_schemas)
OneHotEncoder = lale.operators.make_operator(OneHotEncoderImpl, _combined_schemas)
|
[
"shinnar@us.ibm.com"
] |
shinnar@us.ibm.com
|
dbc12ce824ed5e10927e596c234bf9f282022d55
|
b14176e6931c9cb3e9606147e82cf888efa1e09e
|
/Strings/Verkefni2-move-first3-to-last.py
|
cdc9113152ea31d91dab825c1f17628c402149af
|
[] |
no_license
|
antonbui/Forritun2018
|
735d7659d99804352564f9a2c6fd015e3a7b1b94
|
792fc22dfaef9501644964d1cb14f5afc722b6cb
|
refs/heads/master
| 2020-03-28T23:53:43.476691
| 2018-09-21T17:43:26
| 2018-09-21T17:43:26
| 149,314,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
s = input("Input a string: ")
# your code here
firstthree = s[0:3]
news = s.replace(firstthree, "")
print(news + firstthree)
|
[
"anton17@ru.is"
] |
anton17@ru.is
|
1cd578338746f8fe8cfdfa8f7ea6d3b386d104cb
|
d8b131edbfb69c09e4a208f70a9e47e30db8fcde
|
/two_sum.py
|
0c905272cc35c02901ae7a9eda929eb57440c778
|
[] |
no_license
|
prade02/leetcode
|
06bf499ca02704d288e47870cf07378678187d5c
|
c20c44e76e08d7b257af780caf9147544a821152
|
refs/heads/main
| 2023-04-14T00:51:18.517231
| 2021-04-22T06:23:18
| 2021-04-22T06:23:18
| 331,373,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
"""
Approach 1: a + b = c, a - c = b, c is target, a and b is from the list, so for every a in the list search if b exists, if so a and b forms c(target)
iterate through the list, get a, in the inner iteration, start from i+1 - since sum of elements can not be same. so search for elements after i.
"""
def twoSum(nums, target):
_len = len(nums)
for i in range(_len):
search_for = target - nums[i]
for j in range(i+1, _len):
if nums[j] == search_for:
return [i, j]
x = [3,3]
print(twoSum(x, 6))
|
[
"prade.ycsm@gmail.com"
] |
prade.ycsm@gmail.com
|
c8fc332424dfa6d376d0b6e7e732f5aa75d4f1d7
|
016bf8064e3c5e39c1130ad05ad0f3df6fb8c41c
|
/Seção 13 - Leitura e Escrita em Arquivos/18/main.py
|
812a06e542a4b8c4e75aab7abbf75d961e4f9fcd
|
[] |
no_license
|
JGilba/Python_Exercices
|
0f0f82ec00150e492010086d54b78f92bf591993
|
940b464f5cdc67455cecefbc9b222c362f6e97f2
|
refs/heads/master
| 2022-03-26T09:17:06.201848
| 2020-01-14T16:33:44
| 2020-01-14T16:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
with open('compras.txt') as arq:
preco = 0
for produto in arq.readlines():
preco += float(produto[produto.find(';')+1::])
print(f'O preço total a se pagar é R$ {preco:.2f}')
|
[
"pierrevieiraggg@gmail.com"
] |
pierrevieiraggg@gmail.com
|
87f170cd5ad6e328d576ba48bab8098cc0888823
|
2f829f30be536594b70b602a9aa25feea20bd13d
|
/card.py
|
e11ae6cbf93513e1be9d4769d45b89c21e8f2eb9
|
[] |
no_license
|
Adi0687/App10_PythonOOP
|
6fd9df205d8304734c42741f535f28805cee7aa3
|
ca42f56d504dd80eb7516c67f08ab4c1953a8de0
|
refs/heads/master
| 2023-08-27T00:45:26.905682
| 2021-10-29T16:08:43
| 2021-10-29T16:08:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
import sqlite3
from seat import Seat
class Card:
database = "banking.db"
def __init__(self, type, number, cvc, holder, price):
self.price = price
self.type = type
self.number = number
self.cvc = cvc
self.holder = holder
def validate(self):
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
cursor.execute("""
SELECT type,number,cvc,holder FROM Card WHERE "type"=?
""", [self.type])
card_details = cursor.fetchall()
connection.close()
cardtype = card_details[0][0]
cardnumber = card_details[0][1]
cardcvc = card_details[0][2]
cardholder = card_details[0][3]
if self.number == cardnumber and self.cvc == cardcvc and self.holder == cardholder:
return True
else:
return False
def _balance(self):
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
cursor.execute("""
SELECT balance FROM Card WHERE "type"=?
""", [self.type])
balance = cursor.fetchall()
connection.close()
return float(balance[0][0])
def balance_available(self):
balance_parsed = self._balance()
if balance_parsed - self.price > 0:
return True
else:
return False
def charge_card(self):
charge_amount = self._balance() - self.price
connection = sqlite3.connect(self.database)
connection.execute("""
UPDATE "Card" SET "balance" = ? WHERE "type" = ?
""", [charge_amount, self.type])
connection.commit()
connection.close()
if __name__ == "__main__":
card = Card(type="visa".capitalize(), number=1234567, cvc=133, holder="John Smith", price=5000.0)
if card.validate():
if card.balance_available():
card.charge_card()
else:
print("Not enough money!")
else:
print("One of card details entered are invalid")
|
[
"ferozeaadil@gmail.com"
] |
ferozeaadil@gmail.com
|
6e6b9abbb3a7ac78b74896eae31114c069d3684a
|
9cae0e2129f0f3bef362ee187fa713a475f9ce87
|
/Heuristica + CPLEX/Euristica Problema 1/data.py
|
7284b190b008a170d722070ea2855b535dc2baa6
|
[] |
no_license
|
dvarasm/Optimizacion-Rutas
|
b281c3e7236f177b77cd2753f2c930e13b916fa4
|
7fbc77e8f0a8ad43fc995efccfefab35cc16e2d4
|
refs/heads/master
| 2020-03-28T07:14:39.100115
| 2018-12-11T14:05:31
| 2018-12-11T14:05:31
| 147,889,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,504
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
costos=[#matriz que contiene las distancias entre ciudades 33x33
[0,141,305,266,719,598,1074,1240,1842,1577,1995,2004,2022,2090,2190,2150,2397,2339,2437,2487,2502,2532,2564,2712,2601,2882,2935,3233,2962,3698,4268,5056,5052],
[141,0,441,397,850,740,1205,1372,1973,1708,2127,2136,2153,2222,2321,2281,2528,2471,2569,2618,2634,2664,2697,2843,2733,3013,3066,3364,3094,3829,4399,5187,5184],
[305,441,0,55,316,388,795,961,1563,1298,1716,1725,1743,1811,1911,1871,2118,2060,2158,2208,2224,2254,2287,2433,2322,2603,2653,2954,2683,3419,3989,4777,4773],
[266,397,55,0,451,331,806,973,1574,1309,1728,1737,1754,1823,1922,1882,2129,2072,2170,2219,2235,2265,2298,2444,2334,2614,2667,2965,2695,3430,4000,4788,4785],
[719,850,316,451,0,217,373,539,1140,876,1294,1103,1320,1389,1489,1448,1695,1638,1736,1785,1801,1831,1864,2011,1900,2181,2233,2532,2261,2996,3566,4355,4351],
[598,740,388,331,217,0,572,538,1340,1075,1494,1503,1520,1588,1688,1648,1895,1838,1936,1985,2001,2031,2064,2210,2100,2380,2433,2731,2460,3196,3766,4321,4318],
[1074,1205,795,806,373,572,0,166,768,403,921,930,948,1016,1116,1076,1323,1265,1364,1413,1429,1459,1492,1638,1527,1808,1861,2159,1888,2624,3194,3982,3978],
[1240,1372,961,973,539,538,166,0,612,348,766,575,792,861,961,920,1167,1110,1208,1257,1273,1103,1336,1482,1372,1652,1705,2003,1733,2468,3038,3826,3823],
[1842,1973,1563,1574,1140,1340,768,612,0,266,241,250,268,336,436,396,643,585,684,733,749,779,812,958,847,1128,1181,1479,1208,1944,2514,3302,3298],
[1577,1708,1298,1309,876,1075,403,348,266,0,420,429,446,515,615,574,821,764,862,911,927,957,990,1137,1026,1306,1359,1658,1387,2122,2692,3480,3477],
[1995,2127,1716,1728,1294,1494,921,766,241,420,0,145,69,133,284,192,439,382,480,529,545,575,608,754,644,924,977,1275,1005,1740,2310,3098,3095],
[2004,2136,1725,1737,1303,1503,930,575,250,429,145,0,126,162,223,222,469,412,510,559,575,605,638,784,674,954,1007,1305,1034,1770,2340,3128,3125],
[2022,2153,1743,1754,1320,1520,948,792,268,446,69,126,0,81,224,140,387,330,428,477,493,523,556,703,592,872,925,1224,953,1688,2258,3046,3043],
[2090,2222,1811,1823,1389,1588,1016,861,336,515,133,162,81,0,246,133,380,323,421,470,486,516,549,695,585,865,918,1216,946,1681,2251,3039,3036],
[2190,2321,1911,1922,1489,1688,1116,961,436,615,284,223,224,246,0,154,158,221,319,368,384,414,447,594,483,764,816,1115,844,1579,2149,2938,2934],
[2150,2281,1871,1882,1448,1648,1076,920,396,574,192,222,140,133,154,0,250,193,291,340,356,386,419,566,455,735,788,1087,816,1551,2121,2909,2906],
[2397,2528,2118,2129,1695,1895,1323,1167,643,821,439,469,387,380,158,250,0,193,291,340,356,386,419,566,455,735,788,1087,816,1551,2121,2909,2906],
[2339,2471,2060,2072,1638,1838,1265,1110,585,764,382,412,330,323,221,193,193,0,105,154,170,200,233,380,269,549,602,901,630,1365,1935,2723,2720],
[2437,2569,2158,2170,1736,1936,1364,1208,684,862,480,510,428,421,319,291,291,105,0,45,68,98,131,282,171,452,504,803,532,1264,1834,2622,2622],
[2487,2618,2208,2219,1785,1985,1413,1257,733,911,529,559,477,470,368,340,340,154,45,0,79,113,148,256,145,426,478,777,506,1241,1811,2600,2596],
[2502,2634,2224,2235,1801,2001,1429,1273,749,927,545,575,493,486,384,356,356,170,68,79,0,43,78,266,155,436,488,787,516,1251,1821,2610,2606],
[2532,2664,2254,2265,1831,2031,1459,1303,779,957,575,605,523,516,414,386,386,200,98,113,43,0,33,292,149,462,515,813,543,1278,1848,2636,2633],
[2564,2697,2287,2298,1864,2064,1492,1336,812,990,608,638,556,549,447,419,419,233,131,148,78,33,0,258,134,428,481,779,508,1244,1814,2602,2598],
[2712,2843,2433,2444,2011,2210,1638,1482,958,1137,754,784,703,695,594,566,566,380,282,256,266,292,258,0,131,169,222,520,250,985,1555,2343,2340],
[2601,2733,2322,2334,1900,2100,1527,1372,847,1026,644,674,592,585,483,455,455,269,171,145,155,149,134,131,0,300,353,651,380,1116,1686,2474,2470],
[2882,3013,2603,2614,2181,2380,1808,1652,1128,1306,924,954,872,865,764,735,735,549,452,426,436,462,428,169,300,0,82,381,110,845,1415,2204,2200],
[2935,3066,2653,2667,2233,2433,1861,1705,1181,1359,977,1007,925,918,816,788,788,602,504,478,488,515,481,222,353,82,0,314,43,778,1348,2136,2033],
[3233,3364,2954,2965,2532,2731,2159,2003,1479,1658,1275,1305,1224,1216,1115,1087,1087,901,803,777,787,813,779,520,651,381,314,0,274,705,1552,2340,2337],
[2962,3094,2683,2695,2261,2460,1888,1733,1208,1387,1005,1034,953,946,844,816,816,630,532,506,516,543,508,250,380,110,43,274,0,739,1309,2097,2094],
[3698,3829,3419,3430,2996,3196,2624,2468,1944,2122,1740,1770,1688,1681,1579,1551,1551,1365,1264,1241,1251,1278,1244,985,1116,845,778,705,739,0,410,1473,1470],
[4268,4399,3989,4000,3566,3766,3194,3038,2514,2692,2310,2340,2258,2251,2149,2121,2121,1935,1834,1811,1821,1848,1814,1555,1686,1415,1348,1552,1309,410,0,1282,1452],
[5056,5187,4777,4788,4355,4321,3982,3826,3302,3480,3098,3128,3046,3039,2938,2909,2909,2723,2622,2600,2610,2636,2602,2343,2474,2204,2136,2340,2097,1473,1282,0,47],
[5052,5184,4773,4785,4351,4318,3978,3823,3298,3477,3095,3125,3043,3036,2934,2906,2906,2720,2622,2596,2606,2633,2598,2340,2470,2200,2033,2337,2094,1470,1452,47,0]
]
#lista con el nombre de las ciudades de 1...33
ciudades=['ARICA', 'PUTRE','IQUIQUE','POZO ALMONTE','ANTOFAGASTA','CALAMA','CHAÑARAL','COPIAPO','ILLAPEL','COQUIMBO','LOS ANDES','VALPARAISO','LAMPA','SAN JOSE DE MAIPO',
'PICHILEMU','RENGO','CONSTITUCION','LINARES','CHILLAN','PEMUCO','FLORIDA','CONCEPCION','CORONEL','TEMUCO','ANGOL','VALDIVIA','LA UNION','CASTRO','OSORNO','PUERTO AYSEN','COCHRANE','PUNTA ARENAS','PORVENIR']
|
[
"davarasm@gmail.com"
] |
davarasm@gmail.com
|
98cd25ebf843669b703254e3bdacaa4ddecbcf70
|
09e45825b9b4c81ad0894d40fb122b081aa5f666
|
/vcloud-automation/vcore/getToken.py
|
116e4081e67985ac14894128eac2be474a58dffc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
krisdigitx/python-vcloud-automation
|
dc3adbd244238aa997f761cf090f3e7915bfd5b4
|
7fa290074f9d1d485b6f161ff29e4ab5d52a4f36
|
refs/heads/master
| 2020-12-25T19:04:08.558576
| 2014-10-08T14:51:40
| 2014-10-08T14:51:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
__author__ = 'krishnaa'
import urllib2
import base64
import sys
def getToken(url,login,passwd,method):
handler = urllib2.HTTPSHandler()
opener = urllib2.build_opener(handler)
url = url + '/api/sessions'
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (login, passwd))[:-1]
authheader = "Basic %s" % base64string
request.add_header("Authorization", authheader)
request.add_header("Accept",'application/*+xml;version=5.5')
request.get_method = lambda: method
try:
connection = opener.open(request)
except urllib2.HTTPError,e:
connection = e
if connection.code == 200:
data = connection.read()
print "Session code "
authtoken = connection.info().getheader('x-vcloud-authorization')
#print "Data :", data
else:
print "Unauthorized..."
print "ERROR", connection.code, connection.read()
sys.exit(1)
return authtoken
|
[
"k.shekhar@kainos.com"
] |
k.shekhar@kainos.com
|
bfb4fe299c2658b51e281f49573da38076d393a8
|
8f9e9e3fbc20bb36adeba7ab032bb161bd9c09a9
|
/adaline/AdaLineGD.py
|
b5ee13de1ae115f1c0e44990240dcf86a7784e68
|
[] |
no_license
|
Ghostfyx/machineLearning
|
b8a12bc802b157b821005416590f39f36ae44f9d
|
ed25488bc17598e795335e1527f8c59185be47f9
|
refs/heads/master
| 2020-03-28T11:43:05.809826
| 2018-09-18T10:18:43
| 2018-09-18T10:18:43
| 148,241,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Time : 2018/8/23 上午8:41
@Author : fanyuexiang
@Site :
@File : AdaLineGD.py
@Software: PyCharm
@version: 1.0
@describe: 自适应线性神经元(Adaline)
'''
import numpy as np
class AdaLineGD(object):
def __init__(self, eta = 0.01, n_iter = 50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(X.shape[1]+1)
self.cost_ = []
for i in range(self.n_iter):
# 批量更新权重,批量梯度下降
output = self.net_input(X=X)
errors = (y-output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors ** 2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self,X):
z = self.w_[1:] + self.w_[0]
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self,X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
|
[
"1078101544@qq.com"
] |
1078101544@qq.com
|
8a4504933d4fd77aee0bc709189049a58a5be24c
|
2e6e82c292b59d7d325d63a41b94622d5c3f055e
|
/Level1rev05/xyy_lib.py
|
9bb4e2e5783df2acf5ecc5130f7c639ecd8b9f4a
|
[] |
no_license
|
henls/Level1
|
2f0f5a71c1e6b329bec9033a02dd751e90ba93bb
|
470386859ba4a2dfe10bb20cce99f769068f30ff
|
refs/heads/master
| 2022-09-25T17:02:47.283519
| 2020-06-07T07:27:56
| 2020-06-07T07:27:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,960
|
py
|
# -*- coding: utf-8 -*-
"""
作者:向永源
说明: cupy 函数输入变量是device变量,输出也是device变量!!!
切记!DEVICE中为防止FFT失败,输入前用 astype(np.xxxx) 进行类型转换!!!
更新:
2019-10-15
2019-11-22
2019-11-26
2019-12-26
"""
import math
import numpy as np
import astropy.io.fits as fits
import os
import numpy.fft as fft
import imageio
import cv2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import PillowWriter
import scipy.ndimage as ndm
from collections import Counter
import numpy.random as rand
from numpy import sinc
import cupy as cp
####================================================= 文件操作
''' 读一帧简单格式的fits文件
参数: 文件名
返回: [数据,头文件]
'''
def readfits(filename):
data=fits.getdata(filename)
header=fits.getheader(filename)
return [data,header]
''' 保存一帧简单格式的fits文件
参数: 文件名,数据,头文件
备注:hdr=fits.Header() 是默认变量,当调用函数时候无hdr,则默然创建一个头文件
调用: xyy.writefits(filename, data, hdr), xyy.writefits(filename, data)
返回: FITS文件
'''
def writefits(filename, data, hdr=fits.Header()):
if os.path.exists(filename):
os.remove(filename)
fits.writeto(filename, data, hdr)
''' 寻找目录下所有的文件
参数:主路径,文件名包含的字符
返回:所有含指定字符串的文件列表
'''
def file_search(dirname,filt):
result=[]
for maindir, subdir, file_name_list in os.walk(dirname):
for filename in file_name_list:
apath=os.path.join(maindir, filename)
if filt in apath:
result.append(apath)
return result
''' 寻找目录下所有的子目录
参数:主路径
返回:所有子目录的列表
'''
def subdir_search(dirname):
result=[]
for maindir, subdir, file_name_list in os.walk(dirname):
for subname in subdir:
apath=os.path.join(maindir, subname)
result.append(apath)
return result
''' 创建文件夹
参数:tmppath
'''
def mkdir(tmppath):
if os.path.exists(tmppath):
pass
else:
os.makedirs(tmppath)
''' 目录里FITS文件打包
参数:dirname
返回:cube
'''
def dirfitstocube(dirname):
files = file_search(dirname,'.fits')
zsize = len(files)
data = fits.getdata(files[0])
xsize = data.shape[0]
ysize = data.shape[1]
cube = np.empty([zsize,xsize,ysize], dtype = data.dtype)
for i in range(zsize):
data = fits.getdata(files[i])
cube[i,:,:] = data
print(i)
return cube
''' 子目录所有FITS求平均,并保存为FITS
参数:path
返回:addmean
'''
def dirfitsaddmean(path):
files = file_search(path,'.fits')
zsize = len(files)
if zsize == 0:
print('文件夹下没有FITS文件!!!')
addmean=[]
if zsize >= 1:
nn=zsize
addmean=0
for i in range(zsize):
filename=files[i]
if not os.path.getsize(filename):
os.remove(filename)
nn=nn-1
continue
data = fits.getdata(filename)
addmean = addmean + data
addmean = addmean/nn
return addmean
######+========================================================== 图像显示和电影
'''
显示一张图像
'''
def showimg(data):
plt.close()
mi=max([data.min(),data.mean()-3*data.std()])
mx=min([data.max(),data.mean()+3*data.std()])
plt.imshow(data,vmin=mi,vmax=mx,cmap='gray')
return
''' 三维CUBE做成GIF
参数:cube,gif_name, nx,ny(视频的大小), gap(取图像的间隔)
返回:GIF
'''
def cubetogif(cube, gif_name, nx, ny, gap):
size = cube.shape
zsize = size[0]
xsize = size[1]
ysize = size[2]
xv = min(xsize,nx)
yv = min(ysize,ny)
zv = zsize//gap
frames = np.empty([zv,xv,yv], dtype = np.uint8)
for i in range(zv):
data = cube[i*gap,:,:].astype(np.float32)
tmp = cv2.resize(data, (xv,yv), interpolation = cv2.INTER_CUBIC)
tmp0 = ((tmp-np.min(tmp))/(np.max(tmp)-np.min(tmp))*255.0).astype(np.uint8)
frames[i,:,:] = tmp0
imageio.mimsave(gif_name, frames, 'GIF', duration = 0.05)
return
''' 三维CUBE做成GIF,添加水印
参数:cube,gif_name, gsize(画布大小),gap(取图像的间隔), nfps
返回:GIF
备注:优于cubetogif
'''
def cubetogif2(cube, gif_name, gsize, gap, nfps):
fig = plt.figure(figsize = (gsize, gsize))
zsize = cube.shape[0]
xsize = cube.shape[1]
ysize = cube.shape[2]
zv = zsize//gap
frames = []
for i in range(zv):
fn = i*gap
data = cube[fn,:,:]
im = plt.imshow(data, animated=True, cmap='gray')
text = plt.text(xsize*0.15, ysize*0.85, '{:0>4d}'.format(fn), fontsize=18, style='italic', ha='left',va='bottom',wrap=True)
frames.append([im,text])
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True,repeat_delay=1000)
writer = PillowWriter(fps=nfps, metadata=dict(artist='Me'), bitrate=1800)
ani.save(gif_name,writer=writer)
return ani
''' 目录里FITS文件做成GIF,添加水印
参数:dirname,gif_name, gap(取图像的间隔), nfps
返回:GIF
'''
def dirfitstogif2(dirname, gif_name, gsize, gap, nfps):
fig = plt.figure(figsize = (gsize, gsize))
files = file_search(dirname,['.fits'])
zsize = len(files)
head = fits.getheader(files[0])
xsize = head['NAXIS1']
ysize = head['NAXIS2']
zv = zsize//gap
frames = []
for i in range(zv):
fn = i*gap
data = fits.getdata(files[i])
im = plt.imshow(data, animated=True, cmap='gray')
text = plt.text(xsize*0.15, ysize*0.85, '{:0>4d}'.format(fn), fontsize=18, style='italic', ha='left',va='bottom',wrap=True)
frames.append([im,text])
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True,repeat_delay=1000)
writer = PillowWriter(fps=nfps, metadata=dict(artist='Me'), bitrate=1800)
ani.save(gif_name,writer=writer)
return ani
######========================================================= 图像相关和移动
''' 计算相关最大值位置
参数:参考图ini,目标图obj ; 如果关键字win无,则默认win=1.0
返回:最大值坐标向量(目标相对于参考的位置向量)
'''
def corrmaxloc(ini, obj, win=1.0):
xsize = ini.shape[0]
ysize = ini.shape[1]
initmp = (ini - np.mean(ini))*win
inifft = fft.fft2(initmp)
objtmp = (obj - np.mean(obj))*win
objfft = fft.fft2(objtmp)
corr = np.real(fft.fftshift(fft.ifft2(np.conj(objfft)*inifft)))
maxid = np.where(corr == np.max(corr))
shiftxy = [xsize//2-maxid[0][0], ysize//2-maxid[1][0]]
return shiftxy, corr
''' 计算相关最大值位置 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GPU
参数:参考图ini_gpu,目标图obj_gpu (device 中的变量); 如果关键字win无,则默认win=1.0
返回:最大值坐标向量(目标相对于参考的位置向量)
'''
def corrmaxloc_gpu(ini_gpu, obj_gpu, win=1.0):
xsize = ini_gpu.shape[0]
ysize = ini_gpu.shape[1]
initmp = (ini_gpu - cp.mean(ini_gpu))*win
inifft = cp.fft.fft2(initmp)
objtmp = (obj_gpu - cp.mean(obj_gpu))*win
objfft = cp.fft.fft2(objtmp)
corr_gpu = cp.real(cp.fft.fftshift(cp.fft.ifft2(cp.conj(objfft)*inifft)))
maxid = np.where(corr_gpu == cp.max(corr_gpu))
shiftxy = [xsize//2-maxid[0][0], ysize//2-maxid[1][0]]
return shiftxy, corr_gpu
''' 计算相关最大值位置(亚像元)
参数:参考图ini,目标图obj
返回:最大值坐标向量(目标相对于参考的位置向量)
'''
def corrmaxsubloc(ini, obj, win=1.0):
xsize = ini.shape[0]
ysize = ini.shape[1]
initmp = (ini - np.mean(ini))*win
inifft = fft.fft2(initmp)
#
objtmp = (obj - np.mean(obj))*win
objfft = fft.fft2(objtmp)
corr = np.real(fft.fftshift(fft.ifft2(np.conj(objfft)*inifft)))
maxid = np.where(corr == np.max(corr))
dxy0 = [-xsize//2+maxid[0][0], -ysize//2+maxid[1][0]]
dxy0=np.minimum(np.maximum(dxy0,[-xsize//5*2,-ysize//5*2]),[xsize//5*2,ysize//5*2])
tmp = imgshift(obj,dxy0)
objtmp = (tmp - np.mean(tmp))*win
objfft = fft.fft2(objtmp)
corr = np.real(fft.fftshift(fft.ifft2(np.conj(objfft)*inifft)))
maxid = np.where(corr == np.max(corr))
nn=3
index=np.maximum([maxid[0][0],maxid[1][0]],[nn//2,nn//2])
tmp=corr[index[0]-nn//2:index[0]-nn//2+nn,index[1]-nn//2:index[1]-nn//2+nn]
tmp=tmp-np.min(tmp)
cent=centroid(tmp)
ddxy=[-dxy0[0]+xsize//2-index[0]+nn//2-cent[0], -dxy0[1]+ysize//2-index[1]+nn//2-cent[1]]
return ddxy, corr
''' 相移定理,可用于亚像素平移
参数:img,shift ( [dx,dy] )
返回:
'''
def phaseshift(img,shift):
tmp=img.copy()
fftimg=fft.fftshift(fft.fft2(tmp))
xsize=img.shape[0]
ysize=img.shape[1]
[Y,X]=np.meshgrid(np.arange(ysize)-ysize//2,np.arange(xsize)-xsize//2)
tmp0=fftimg*np.exp(2.0*np.pi*(X*shift[0]/xsize+Y*shift[1]/ysize)*(-1j))
result=fft.ifft2(fft.ifftshift(tmp0)).real
return result
''' 相移定理,可用于亚像素平移 ~~~~~~~~~~~ GPU
参数:img_cupy ,shift ( [dx,dy] )
返回:
结论:速度 phaseshift_cupy > imgsubshift > imgsubshift_cupy > phaseshift
'''
def phaseshift_cupy(img_cupy,shift):
fftimg_cupy=cp.fft.fftshift(cp.fft.fft2(img_cupy))
xsize=img_cupy.shape[0]
ysize=img_cupy.shape[1]
[Y,X]=cp.meshgrid(cp.arange(ysize)-ysize//2,cp.arange(xsize)-xsize//2)
phas=cp.zeros([xsize,ysize],dtype=cp.complex64)
phas.imag=-2.0*cp.pi*cp.add(X*shift[0]/xsize,Y*shift[1]/ysize)
tmp0=cp.multiply(fftimg_cupy,cp.exp(phas))
result_cupy=cp.fft.ifft2(cp.fft.ifftshift(tmp0)).real
return result_cupy
''' 亚像素平移
参数:img,shift
返回:
备注: 结果和相移定理一毛一样!
'''
def imgsubshift(img,shift):
tmp = img.copy()
fftimg = fft.fft2(tmp)
tmp0 = ndm.fourier_shift(fftimg, shift)
result = fft.ifft2(tmp0).real
return result
''' 亚像素平移
参数:img_cupy,shift
返回:
'''
def imgsubshift_cupy(img_cupy,shift):
fftimg_cupy = cp.fft.fft2(img_cupy)
fftimg=cp.asnumpy(fftimg_cupy)
tmp0=ndm.fourier_shift(fftimg, shift)
tmp0_cupy=cp.asarray(tmp0)
result_cupy=cp.fft.ifft2(tmp0_cupy).real
return result_cupy
''' 二维数组的平移
参数:数组,[dx, dy]
返回:平移后的数组
备注:此处不宜加上@jit,运行速度不升反降
'''
def imgshift(img,dxy):
imgout=np.copy(img)
imgout=np.roll(imgout,int(dxy[0]),axis=0)
imgout=np.roll(imgout,int(dxy[1]),axis=1)
return imgout
''' 二维数组的平移 ~~~~~~~~~~~~~ GPU
参数:img_cupy,[dx, dy] (输入变量必须是 device 中的变量)
返回:平移后的数组(device 变量)
'''
def imgshift_cupy(img_cupy,dxy):
imgout_cupy=cp.copy(img_cupy)
imgout_cupy=cp.roll(imgout_cupy,int(dxy[0]),axis=0)
imgout_cupy=cp.roll(imgout_cupy,int(dxy[1]),axis=1)
return imgout_cupy
''' 三维数组对齐
参数:subcube,lxp, corsize, win相关的窗函数
返回:nsubcube
'''
def cube_align(subcube,lxp,corsize,win=1.0):
zsize = subcube.shape[0]
xsize = subcube.shape[1]
ysize = subcube.shape[2]
corstart=[(xsize-corsize[0])//2,(ysize-corsize[1])//2]
ini = lxp[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
nsubcube = np.zeros([zsize,xsize,ysize], dtype = subcube.dtype)
for i in range(zsize):
data = subcube[i,:,:].copy()
obj = data[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
cc,corr = corrmaxloc(ini, obj, win)
nsubcube[i,:,:] = imgshift(data,[-cc[0],-cc[1]])
return nsubcube
''' 三维数组对齐(两组)
参数:cube2(信噪比高),cube1(信噪比低),lxp2(参考图), corsize, win相关的窗函数
返回:cube2, cube1
'''
def twocube_align(subcube2,subcube1,lxp2,corsize,win=1.0):
zsize = subcube2.shape[0]
xsize = subcube2.shape[1]
ysize = subcube2.shape[2]
corstart=[(xsize-corsize[0])//2,(ysize-corsize[1])//2]
ini = lxp2[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
nsubcube2 = np.empty([zsize,xsize,ysize], dtype = subcube2.dtype)
nsubcube1 = np.empty([zsize,xsize,ysize], dtype = subcube2.dtype)
for i in range(zsize):
data = subcube2[i,:,:].copy()
obj = data[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
cc,corr = corrmaxloc(ini, obj, win)
nsubcube2[i,:,:] = imgshift(data,[-cc[0],-cc[1]])
data = subcube1[i,:,:].copy()
nsubcube1[i,:,:] = imgshift(data,[-cc[0],-cc[1]])
return nsubcube2,nsubcube1
''' 三维数组对齐(亚像元)
参数:cube2(信噪比高),cube1(信噪比低),lxp2(参考图), corsize, win相关的窗函数
返回:cube2, cube1
'''
def twocube_align_sub(subcube2,subcube1,lxp2,corsize,win=1.0):
zsize = subcube2.shape[0]
xsize = subcube2.shape[1]
ysize = subcube2.shape[2]
corstart=[(xsize-corsize[0])//2,(ysize-corsize[1])//2]
ini = lxp2[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
nsubcube2 = np.empty([zsize,xsize,ysize], dtype = subcube2.dtype)
nsubcube1 = np.empty([zsize,xsize,ysize], dtype = subcube2.dtype)
for i in range(zsize):
data = subcube2[i,:,:].copy()
obj = data[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
cc,corr = corrmaxsubloc(ini, obj, win)
nsubcube2[i,:,:] = imgsubshift(data,[-cc[0],-cc[1]])
data = subcube1[i,:,:].copy()
nsubcube1[i,:,:] = imgsubshift(data,[-cc[0],-cc[1]])
return nsubcube2,nsubcube1
######======================================== 窗函数和滤波器
'''
#---说明: 返回图像振幅和相位
#---参数: img
#---返回: modul,phase
'''
def imgmodpha(img):
[xsize,ysize]=img.shape
sp=fft.fftshift(fft.fft2(img))/xsize/ysize
modul=np.abs(sp)
phase=np.angle(sp)
return modul,phase
'''
#---说明: 振幅和相位返回图像
#---参数: modul,phase
#---返回: img
'''
def modphaimg(modul,phase):
[xsize,ysize]=modul.shape
sp=modul*np.exp(0+1j*phase)*xsize*ysize
img=fft.ifft2(fft.ifftshift(sp)).real
return img
''' 窗函数
参数:nx,ny 窗函数大小,
apod切趾的比例,
a0=0.5 for hanning, a0=25.0/46.0 for hamming, 当无参数winsty时,默认a0=0.5
调用:xyy.win(xsize,ysize,0.2,'hamm')
返回:win
'''
def win(nx,ny,apod,winsty=0.0):
if winsty == 'hann':
a0=0.5
if winsty == 'hamm':
a0=25.0/46.0
if winsty == 0.0:
a0=0.5
nn = np.int16((apod*nx)//2*2+1)
wx = a0-(1.0-a0)*np.cos(2.0*np.pi*np.arange(nn).reshape(nn,1)/(nn-1))
maxp = np.max(wx)
maxid = np.where(wx == maxp)
c = maxid[0][0]
w1 = np.empty([nx,1], dtype = np.float32)
w1[0:c]=wx[0:c]
w1[c:nx-(nn-c)]=maxp
w1[nx-(nn-c):nx]=wx[c:nn]
nn = np.int16((apod*ny)//2*2+1)
wy = a0-(1.0-a0)*np.cos(2.0*np.pi*np.arange(nn).reshape(nn,1)/(nn-1))
maxp = np.max(wy)
maxid = np.where(wy == maxp)
c = maxid[0][0]
w2 = np.empty([ny,1], dtype = np.float32)
w2[0:c]=wy[0:c]
w2[c:ny-(nn-c)]=maxp
w2[ny-(nn-c):ny]=wy[c:nn]
win = np.dot(w1, w2.T)
win=win/np.max(win)
return win
'''
#----说明: 频域图像滤波(卷积)
#----参数: data, filt
#----返回: img
'''
def ImgFilted(data,filt):
fftobj=fft.fft2(data)
fftpsf=fft.fft2(filt)
img=fft.fftshift((fft.ifft2(fftobj*fftpsf))).real.astype(np.float32)
return img
'''
#----说明: 图像功率谱推卷积
#----参数: data, sitf
#----返回: img
'''
def ImgPSDdeconv(data,sitf):
modul,phase=imgmodpha(data)
mod=np.sqrt(modul**2/(sitf+0.0001))
img=modphaimg(mod,phase)
return img
''' 二维高斯函数
参数:xsize,ysize,delta
返回:
'''
def gaussf2d(xsize,ysize,delta):
xline=np.exp(-(np.arange(xsize)-xsize//2)**2/(delta)**2.0)
yline=np.exp(-(np.arange(ysize)-ysize//2)**2/(delta)**2.0)
[Y,X]=np.meshgrid(yline,xline)
return Y*X
######==================================================================================== 其他
''' 圆孔
参数: xsize,ysize,radus
说明: 圆孔中心(xsize//2,ysize//2)
'''
def circlepupil(xsize,ysize,radus):
pupil=np.zeros([xsize,ysize],dtype=np.float32)
[Y,X]=np.meshgrid(np.arange(ysize)-ysize//2,np.arange(xsize)-xsize//2)
R=np.sqrt(Y*Y+X*X)
pupil=np.where(R<=radus,1.0,0.0)
return pupil
''' 图像径向求平均
参数: data
返回: datanew
'''
def imgradusmean(data):
xsize,ysize=data.shape
[Y,X]=np.meshgrid(np.arange(ysize)-ysize//2,np.arange(xsize)-xsize//2)
R=np.sqrt(X*X+Y*Y)
datanew=np.zeros([xsize,ysize],dtype=np.float32)
datanew[xsize//2,ysize//2]=data[xsize//2,ysize//2]
for i in np.arange(0,np.min([xsize,ysize])//2):
mask=np.where(R<=i+1,1.0,0.0)-np.where(R<=i,1.0,0.0)
val=np.sum(mask*data)/np.sum(mask)
datanew=datanew+val*mask
return datanew
''' 返回质心坐标
参数:img
返回:[dx,dy]
'''
def centroid(img):
xsize=img.shape[0]
ysize=img.shape[1]
[Y,X]=np.meshgrid(np.arange(ysize),np.arange(xsize))
dx = np.sum(X*img)/np.sum(img)
dy = np.sum(Y*img)/np.sum(img)
return [dx,dy]
''' 二维数组BINNING
参数:参考图img,bins
返回:图像new
'''
def imgbin(img, bins):
xsize = img.shape[0]
ysize = img.shape[1]
newxsize = xsize//bins
newysize = ysize//bins
cpy = img.copy()
tmp = np.empty([xsize,newysize], dtype = np.int16)
for i in range(xsize):
tmp[i,:] = cpy[i,:newysize*bins].reshape(-1,bins).mean(axis=1)
new = np.empty([newxsize,newysize], dtype = np.int16)
for i in range(newysize):
new[:,i] = tmp[:newxsize*bins,i].reshape(-1,bins).mean(axis=1)
return new
''' 计算两幅图像(二维矩阵)的皮尔森积矩相关系数
参数:imga,imgb,
返回:
'''
def prs_cor_coef(ima,imb):
imga=ima.copy()/np.mean(ima)
imgb=imb.copy()/np.mean(imb)
cov=np.sum((imga-np.mean(imga))*(imgb-np.mean(imgb)))
tha=(np.sum((imga-np.mean(imga))**2.0))**0.5
thb=(np.sum((imgb-np.mean(imgb))**2.0))**0.5
cc=cov/(tha*thb)
return cc
'''
#---说明: 计算各点到原点(二维图像中心))的欧式距离
#---参数: row(行数),col(列数)
#---返回: array
'''
def dist(row,col):
[X,Y]=np.meshgrid(np.arange(col)-col//2,np.arange(row)-row//2)
dist=(X**2.0+Y**2.0)**0.5
return dist
######################===================================================== 传递函数
'''
# 函数: 计算特定频率下环形光瞳的传递函数, 几何算法, 归一化的自相关面积
# 参数: a:遮挡比,a=0 代表清澈圆孔径; rho:空间频率
# 返回:该频率下的归一化OTF值
'''
def ringotfcal(a, rho):
if (rho < 0) or (rho > 1): return 0.0
if (a < 0) or (a > 1): return 0.0
#----------------------
r=0.5
if (rho < 2.0*r):
c=2.0*math.acos(rho/(r*2.0))*r**2-rho*math.sqrt(r**2-rho**2/4.0)
else:
c=0.0
if (rho < 2.0*r*a):
e=2.0*math.acos(rho/(a*r*2.0))*(r*a)**2-rho*math.sqrt((r*a)**2-rho**2/4.0)
else:
e=0.0
if (rho <= r+a*r) and (rho > r-a*r):
s1=0.5*math.acos(((r*a)**2+rho**2-r**2)/(2.0*r*a*rho))*(r*a)**2
s2=0.5*math.acos((rho**2+r**2-(r*a)**2)/(2.0*r*rho))*r**2
s3=0.5*math.sin(math.acos(((r*a)**2+rho**2-r**2)/(2.0*a*r*rho)))*a*r*rho
d=2.0*(s1+s2-s3)
else:
if rho <= r-a*r :
d=math.pi*(a*r)**2
else :
d=0.0
#--------------------------
h=(c+e-2*d)/(math.pi*r**2)
if rho == 0 :
h=1-a**2
return h
'''
# 函数:计算指定环型光瞳,指定采样比例尺的望远镜OTF, a=0代表清澈圆孔径
# 返回:二维数组,OTF
'''
def telotf(a, maxfre, width):
half=width//2
cent=width//2
otf=np.zeros([width,width],dtype=np.float32)
for i in range(width):
for j in range(width):
fre=np.sqrt((i-cent)**2+(j-cent)**2)/half*maxfre
freq=np.minimum(fre,1.0)
otf[i,j]= ringotfcal(a, freq)
otf=otf/otf[cent,cent]
return otf
'''
# 函数: 大气短曝光传递函数计算
# 返回: 特定空间频率下的 OTF 值
'''
def atsotfcal(diameter, r0, fre):
#otf=np.exp(-3.44*(diameter/r0*fre)**(5/3)*(1-fre**(1/3)))
otf=np.exp(-3.44*(diameter/r0*fre)**(5/3)*(1-np.exp(-fre**3)*fre**(1/3))) # 此举抑制高频上翘
return otf
'''
# 函数: 大气短曝光传递函数计算
# 返回: 二维数组
'''
def atsotf(diameter, r0, width, maxfre):
half=width//2
cent=width//2
[Y,X]=np.meshgrid(np.arange(width)-half,np.arange(width)-half)
fre=np.sqrt(X*X+Y*Y)/half*maxfre
freq=np.minimum(fre,1.0)
sotf=np.exp(-3.44*(diameter/r0*freq)**(5/3)*(1-np.exp(-freq**3)*freq**(1/3))) # 此举抑制高频上翘
sotf=sotf/sotf[cent,cent]
return sotf
'''
# 函数:计算综合系统短曝光传递函数
# 返回:二维数组,OTF
'''
def sotf(diameter, a, r0, maxfre, width):
half=width//2
cent=width//2
sotf=np.zeros([width,width],dtype=np.float32)
for i in range(width):
for j in range(width):
fre=np.sqrt((i-cent)**2+(j-cent)**2)/half*maxfre
freq=np.minimum(fre,1.0)
sotf[i,j]=ringotfcal(a, freq)*atsotfcal(diameter, r0, freq)
sotf=sotf/sotf[cent,cent]
return sotf
'''
# 函数: 大气长曝光传递函数计算
# 返回: 特定空间频率下的 OTF 值
'''
def atlotfcal(diameter, r0, fre):
otf=np.exp(-3.44*(diameter/r0*fre)**(5/3))
return otf
'''
# 函数: 大气长曝光传递函数计算
# 返回: 二维数组
'''
def atlotf(diameter, r0, width, maxfre):
half=width//2
cent=width//2
[Y,X]=np.meshgrid(np.arange(width)-half,np.arange(width)-half)
fre=np.sqrt(X*X+Y*Y)/half*maxfre
freq=np.minimum(fre,1.0)
lotf=np.exp(-3.44*(diameter/r0*freq)**(5/3))
lotf=lotf/lotf[cent,cent]
return lotf
'''
# 函数:计算综合系统长曝光传递函数
# 返回:二维数组,OTF
'''
def lotf(diameter, a, r0, maxfre, width):
cent=width//2
half=width//2
lotf=np.zeros([width,width],dtype=np.float32)
for i in range(width):
for j in range(width):
fre=np.sqrt((i-cent)**2+(j-cent)**2)/half*maxfre
freq=np.minimum(fre,1.0)
lotf[i,j]=ringotfcal(a, freq)*atlotfcal(diameter, r0, freq)
lotf=lotf/lotf[cent,cent]
return lotf
'''
# 说明: 计算标准谱比(短曝光)
# 输入: sitfdata, diameter, diaratio, maxfre, subsize, start_r0, step_r0
# 返回: 三维数组
'''
def sotfsrstand(sitfdata,diameter,diaratio,maxfre,subsize,start_r0,step_r0):
r0num=sitfdata.shape[0]
TelOtf=telotf(diaratio, maxfre, subsize)
sotfsrstand=np.zeros([r0num,subsize, subsize], dtype=np.float32)
for i in range(r0num):
subsitf=GetSitf(sitfdata,maxfre,subsize,i)
r0=start_r0+step_r0*i
AtSotf=atsotf(diameter, r0, subsize, maxfre)
sotf=TelOtf*AtSotf
sotfsr=sotf**2/(subsitf)
sotfsr=sotfsr/sotfsr[subsize//2,subsize//2]
sotfsrstand[i,:,:]=sotfsr
return sotfsrstand
'''
# 说明: 计算标准谱比(长曝光)
# 输入: sitfdata, diameter, diaratio, maxfre, subsize, start_r0, step_r0
# 返回: 三维数组
'''
def lotfsrstand(sitfdata,diameter,diaratio,maxfre,subsize,start_r0,step_r0):
r0num=sitfdata.shape[0]
TelOtf=telotf(diaratio, maxfre, subsize)
lotfsrstand=np.zeros([r0num,subsize, subsize], dtype=np.float32)
for i in range(r0num):
subsitf=GetSitf(sitfdata,maxfre,subsize,i)
r0=start_r0+step_r0*i
AtLotf=atlotf(diameter, r0, subsize, maxfre)
lotf=TelOtf*AtLotf
lotfsr=lotf**2/(subsitf)
lotfsr=lotfsr/lotfsr[subsize//2,subsize//2]
lotfsrstand[i,:,:]=lotfsr
return lotfsrstand
'''
# 说明: 计算一组三维数组的谱比
# 输入: cubesub,winsr
# 返回: sr (二维数组)
'''
def cubesrcal(cubesub,winsr):
srsize=cubesub.shape[1]
corsize=[int(srsize*0.8),int(srsize*0.8)]
#----- 计算平均帧
meanf=np.mean(cubesub,axis=0)
#----- 平均帧的平均值
mfval=np.mean(meanf)
#----- 以平均帧对齐
cubesubalign=cube_align(cubesub,meanf,corsize)
#------------- 平均叠加帧加窗
lxp=(meanf-mfval)*winsr+mfval
#----- 计算每一帧图像的均值
meanv=np.mean(cubesubalign,axis=(1,2))
mvcast=meanv[:,None,None]
#----(每一帧图像-其均值)* 窗函数 + 其均值
cubesubalignwin=(cubesubalign-mvcast)*winsr+mvcast
#-------傅里叶变换(得到每一帧频谱)
cubesp=fft.fftshift(fft.fft2(cubesubalignwin,axes=(1,2)),axes=(1,2))
psd=Psdcubecal(cubesp)
psd=psd/psd[srsize//2,srsize//2]
psdnd,noise=Psdnd(psd)
psdnd=psdnd/psdnd[srsize//2,srsize//2]
#--------------计算平均短曝光传递函数
sotf2=np.abs(np.fft.fftshift(np.fft.fft2(lxp)))**2
sotf2=sotf2/sotf2[srsize//2,srsize//2]
sotf2nd,noise2=Psdnd(sotf2)
sotf2nd=sotf2nd/sotf2nd[srsize//2,srsize//2]
#--------------计算谱比
sr=sotf2/(psdnd+0.0000001)
sr=sr/sr[srsize//2,srsize//2]
return sr,sotf2,psd,noise
'''
# 说明: 谱比导出r0
# 输入: sr, srarry(标准谱比), maxfre(用于确定截止频率位置), low(环带积分内环位置), hig(环带积分外环位置)
# 返回: r0
'''
def srdevr0(sr,srarry,maxfre,low,hig,start_r0,step_r0):
srsize=sr.shape[1]
[Y,X]=np.meshgrid(np.arange(srsize)-srsize//2,np.arange(srsize)-srsize//2)
mask1=np.where(np.sqrt(X**2+Y**2)<=(srsize//2)/(1.0*maxfre)*hig, 1.0, 0.0)
mask2=np.where(np.sqrt(X**2+Y**2)>=(srsize//2)/(1.0*maxfre)*low, 1.0, 0.0)
masksr=mask1*mask2
diff1=(srarry-sr)*masksr
diff2=diff1**2.0
valarr=np.sum(diff2,axis=(1,2))
idex=np.where(valarr==np.min(valarr))[0][0]
r0=start_r0+step_r0*idex
return r0,masksr
'''
# 说明: cube分块谱比导出r0
# 输入: cubesr,srsize,winsr,sitfdata,diameter,diaratio,maxfre,low,hig,start_r0,step_r0
# 返回: r0,index
'''
def cubesrdevr0(cubesr,srsize,winsr,sitfdata,diameter,diaratio,maxfre,low,hig,start_r0,step_r0):
srarry=sotfsrstand(sitfdata,diameter,diaratio,maxfre,srsize,start_r0,step_r0)
xnum=cubesr.shape[2]//srsize
ynum=cubesr.shape[1]//srsize
r0arr=[]
###corsize=[int(srsize*0.9),int(srsize*0.9)]
for i in range(xnum):
for j in range(ynum):
cubesub=cubesr[:,i*srsize:i*srsize+srsize,i*srsize:i*srsize+srsize]
###meanf=np.mean(cubesub,axis=0)
###cubesubalign=cube_align(cubesub,meanf,corsize,win=1.0)
sr,sotf2,psd,noise=cubesrcal(cubesub,winsr)
sr_filter=ndm.gaussian_filter(sr, sigma=0.8)
sr_filter=sr_filter/sr_filter[srsize//2,srsize//2]
r0,masksr=srdevr0(sr_filter,srarry,maxfre,low,hig,start_r0,step_r0)
r0arr.append(r0)
r0=(Counter(r0arr).most_common(1)[0][0]).astype(np.float32)
index=np.rint((r0-start_r0)/step_r0).astype(np.int)
print('r0 =',r0)
return r0,index
'''
#----说明: 计算斑点干涉术传递函数
#----参数: stfdata, Maxfre, IMsize, idx
#----返回: sitf
'''
def GetSitf(stfdata,maxfre,imsize,idx):
sitfsize=stfdata.shape[1]
mpr=(sitfsize)*(maxfre*2.0)/(imsize)
[Y,X]=np.meshgrid(np.arange(imsize)-imsize//2,np.arange(imsize)-imsize//2)
mprtx=np.int64(np.sqrt(X*X+Y*Y)*mpr)
sitf=stfdata[idx,np.minimum(mprtx,sitfsize-1)]
sitf=sitf/sitf[imsize//2,imsize//2]
return sitf
'''
#----说明: 功率谱退卷积
#----参数: fdata, subsitf
#----返回: img
'''
def PsdDeconv(data,subsitf):
[xsize,ysize]=data.shape
datasp=fft.fftshift(fft.fft2(data))/xsize/ysize
pha=np.angle(datasp)
psd=np.abs(datasp)**2
mod=np.sqrt(psd/(subsitf+0.0005))
sp=mod*np.exp(0+1j*pha)*xsize*ysize
img=fft.ifft2(fft.ifftshift(sp)).real
return img
|
[
"879951213@qq.com"
] |
879951213@qq.com
|
7121f658874a3d432cf6c2c43832f69335644a6f
|
8e1012375203b3107175418661873595f2fd26f6
|
/pymatgen/io/vasp/InterfaceExamples/simpler_interface.py
|
27673ac4ba647b29455108b9a5da870ac475b315
|
[
"MIT"
] |
permissive
|
dcossey014/pymatgen
|
696c250b60ddb3b46e6b2263f38cde805ea3731c
|
bed24cf3c36e5a7639cad4f6edd435adf72c78ca
|
refs/heads/master
| 2021-01-25T02:29:30.911046
| 2016-02-11T20:54:04
| 2016-02-11T20:54:04
| 50,128,358
| 1
| 0
| null | 2016-01-21T18:37:06
| 2016-01-21T18:37:06
| null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
#!/usr/bin/env python
import os
import sys
import string
import json
import inspect
from pymatgen import Structure
from fireworks import Firework, Workflow, LaunchPad
from pymatgen.io.vasp.interfaces import VaspInputInterface, VaspFirework, VaspWorkflow
# get structure from Crystallographic Information File (CIF)
s = Structure.from_file('./mp-33088_Cr2FeO4.cif')
input=VaspInputInterface(s)
input.NEDOS=2000 # override default or add INCAR parameter
# Dump VASP Input into current directory for inspection
input.write_input()
# Complete definition of Firework Task(s) and add to
# Firework
task=VaspFirework(input)
# Save specification to yaml file for later inspection
# or manual add to launchpad with lpad script
task.to_file("simple_task.yaml")
# Adds single Firework to launchpad database
task.add_fw_to_launchpad()
|
[
"dcossey014@gmail.com"
] |
dcossey014@gmail.com
|
c83857f6f9f57632006b19b8b6a09d3526c120cf
|
0d2dcbd83808667cc6b9f320c9ea1f0f07768c0c
|
/app/idealers/orders/tests/test_services.py
|
1e06d1546f7395dc064cc92338bb70a3c9c48ef1
|
[
"MIT"
] |
permissive
|
leydson-vieira/dealers
|
bae186341126c7a99eff10ee9568e47de125c5a2
|
14f2f307f0f4497eec92f65d01ef111b42d528b9
|
refs/heads/master
| 2023-07-05T18:25:38.978565
| 2021-09-01T20:51:44
| 2021-09-01T20:51:44
| 400,350,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
from datetime import datetime
from decimal import Decimal
import pytest
from ..exceptions import DealerDoesNotExist
from ..models import Order
from ..services import OrderService
@pytest.mark.django_db
class TestOrderService:
@pytest.fixture
def order_data(self):
return {
'code': 'order_code',
'amount': 5000,
'date': datetime.now(),
'cpf': '38723274884'
}
def test_should_not_create_an_order_because_delaer_doesnt_exist(
self, user, order_data
):
order_data['cpf'] = '99999999999'
with pytest.raises(DealerDoesNotExist):
OrderService.create_order(**order_data)
def test_should_create_an_order(self, order_data, user):
order = OrderService.create_order(**order_data)
assert order
def test_order_should_generate_a_cashback(self, order_data, user):
order = OrderService.create_order(**order_data)
assert hasattr(order, 'cashback')
@pytest.mark.parametrize(
'order_amount,expected_cashback_amount',
[
(Decimal('100'), Decimal('10')),
(Decimal('1000'), Decimal('100')),
(Decimal('1500'), Decimal('225')),
(Decimal('2000'), Decimal('400')),
]
)
def test_cashback_calcs(
self, order_amount, expected_cashback_amount, order_data, settings, user
):
settings.FIRST_LEVEL_CASHBACK_TARGET = 1000
settings.FIRST_LEVEL_CASHBACK_PERCENT = '0.1'
settings.SECOND_LEVEL_CASHBACK_TARGET = 1500
settings.SECOND_LEVEL_CASHBACK_PERCENT = '0.15'
settings.THIRD_LEVEL_CASHBACK_PERCENT = '0.20'
order_data['amount'] = order_amount
order = OrderService.create_order(**order_data)
assert order.cashback.amount == expected_cashback_amount
@pytest.mark.parametrize(
'cpf,status',
[
('38723274884', Order.Status.IN_VALIDATION),
('15350946056', Order.Status.APPROVED),
],
)
def test_should_return_status_validation_to_approved_cpf(
self, cpf, status, order_data, user, approved_user
):
order_data['cpf'] = cpf
order = OrderService.create_order(**order_data)
assert order.status == status
|
[
"leydson.vieira@gmail.com"
] |
leydson.vieira@gmail.com
|
cce07681016b768fecb0d4fc67ad9995df1da902
|
ab97d04983b6f150b9265cecc06b1d1a8c0cc1bd
|
/django/django_fundamentals/time_display/time_display/urls.py
|
ca2280ed36dacae1f2fb794c03f62f6c4bc3fead
|
[] |
no_license
|
seahawkz/python_stack
|
aaadb19f7f00edb7b8f0a5846024266b28ccea7b
|
27d08500dc737131b63545ca36f102b447183d3a
|
refs/heads/main
| 2023-05-31T20:25:10.338799
| 2021-06-16T01:25:11
| 2021-06-16T01:25:11
| 353,156,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
"""time_display URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('time_app1.urls')),
]
|
[
"derekanderson8@msn.com"
] |
derekanderson8@msn.com
|
4b8ea03ed98b010b286017125229cfb4a201fdd6
|
3dbb9810eacf60ef11c6deda6085593852ee6349
|
/data_prep/support_scripts/stage_2/stage_2_swarm_gen.py
|
b8fbb548ff6bde8265c19fff649a7de1aae59268
|
[] |
no_license
|
Ngoyal95/abcd_cca_replication
|
7dcf5f8da8a4e5c1fff9ace27f6f68c48639e649
|
54af5ff4ddb0738fb7ce0fc13e6865614f7370c9
|
refs/heads/master
| 2023-04-13T18:19:38.234640
| 2021-04-30T17:30:05
| 2021-04-30T17:30:05
| 240,342,289
| 0
| 0
| null | 2020-03-31T11:11:07
| 2020-02-13T19:21:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 917
|
py
|
#!/usr/bin/python3
# stage_2_swarm_gen.py
# Created: 6/16/20
# Last edited:
# Written by Nikhil Goyal, National Institute of Mental Health, 2019-2020
# Generate commands of the form:
# $SUPPORT_SCRIPTS/stage_2/calc_avg_motion.sh $sub $ABCD_CCA_REPLICATION
import os
import sys
fp_sub_list = sys.argv[1] # absolute path to file that contains subject ids
abcd_cca_replication = sys.argv[2] # absolute path to main directory in repo (where pipeline.config located)
swarm_dir = sys.argv[3] # where to output swarm file
script_to_call = sys.argv[4] # name of the script to call (absolute path)
subjects = [line.rstrip('\n') for line in open(fp_sub_list)]
fp = os.path.join(swarm_dir,'stage_2.swarm')
f_swarm = open(fp, 'w')
for subject in subjects:
cmd = "{} {} {}".format(script_to_call, subject, abcd_cca_replication)
f_swarm.write(cmd+'\n')
f_swarm.close()
|
[
"nikhil.r.goyal@gmail.com"
] |
nikhil.r.goyal@gmail.com
|
b2937b58d984cfd9a9d6b383e3b8b36650e8289f
|
a4b185d6e5c7c6d33e91d201c4994ff1cf7a9871
|
/tushare/stock/ref_vars.py
|
0736204218a71eabeeaa4ae079e7b586b77be101
|
[
"BSD-3-Clause"
] |
permissive
|
yzzhang-pku/TuShare
|
a0cb1ebf8907aeb772d1c5e24e4515a2ebcefeec
|
13bfba0171349aa408b96dd4ae44cf932226fea6
|
refs/heads/master
| 2020-03-17T21:18:44.512873
| 2018-05-18T14:26:52
| 2018-05-18T14:26:52
| 133,953,301
| 0
| 0
|
BSD-3-Clause
| 2018-05-18T12:46:12
| 2018-05-18T12:46:11
| null |
UTF-8
|
Python
| false
| false
| 3,415
|
py
|
# -*- coding:utf-8 -*-
DP_URL = '%sapp.finance.%s/data/stock/%s?day=&page=%s'
DP_163_URL = '%squotes.%s/data/caibao/%s?reportdate=%s&sort=declaredate&order=desc&page=%s'
FUND_HOLDS_URL = '%squotes.%s/hs/marketdata/service/%s?host=/hs/marketdata/service/%s&page=%s&query=start:%s;end:%s&order=desc&count=60&type=query&req=%s'
XSG_URL = '%sdatainterface.%s/EM_DataCenter/%s?type=FD&sty=BST&st=3&sr=true&fd=%s&stat=%s'
LHB_URL = '%sdata.%s/stock/lhb/%s.html'
LHB_SINA_URL = '%s%s/q/go.php/vLHBData/kind/%s/%s?last=%s&p=%s'
LHB_COLS = ['code', 'name', 'pchange', 'amount', 'buy', 'bratio', 'sell', 'sratio', 'reason']
NEW_STOCKS_URL = '%s%s/corp/view/%s?page=%s&cngem=0&orderBy=NetDate&orderType=desc'
MAR_SH_HZ_URL = '%s%s/marketdata/tradedata/%s?jsonCallBack=jsonpCallback%s&isPagination=true&tabType=&pageHelp.pageSize=100&beginDate=%s&endDate=%s%s&_=%s'
MAR_SH_HZ_REF_URL = '%s%s/market/dealingdata/overview/margin/'
MAR_SH_MX_URL = '%s%s/marketdata/tradedata/%s?jsonCallBack=jsonpCallback%s&isPagination=true&tabType=mxtype&detailsDate=%s&pageHelp.pageSize=100&stockCode=%s&beginDate=%s&endDate=%s%s&_=%s'
MAR_SZ_HZ_URL = '%s%s/szseWeb/%s?SHOWTYPE=EXCEL&ACTIONID=8&CATALOGID=1837_xxpl&txtDate=%s&tab2PAGENUM=1&ENCODE=1&TABKEY=tab1'
MAR_SZ_MX_URL = '%s%s/szseWeb/%s?SHOWTYPE=EXCEL&ACTIONID=8&CATALOGID=1837_xxpl&txtDate=%s&tab2PAGENUM=1&ENCODE=1&TABKEY=tab2'
MAR_SH_HZ_TAIL_URL = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'
TERMINATED_URL = '%s%s/%s?jsonCallBack=jsonpCallback%s&isPagination=true&sqlId=COMMON_SSE_ZQPZ_GPLB_MCJS_ZZSSGGJBXX_L&pageHelp.pageSize=50&_=%s'
SUSPENDED_URL = '%s%s/%s?jsonCallBack=jsonpCallback%s&isPagination=true&sqlId=COMMON_SSE_ZQPZ_GPLB_MCJS_ZTSSGS_L&pageHelp.pageSize=50&_=%s'
TERMINATED_T_COLS = ['COMPANY_CODE', 'COMPANY_ABBR', 'LISTING_DATE', 'CHANGE_DATE']
LHB_KINDS = ['ggtj', 'yytj', 'jgzz', 'jgmx']
LHB_GGTJ_COLS = ['code', 'name', 'count', 'bamount', 'samount', 'net', 'bcount', 'scount']
LHB_YYTJ_COLS = ['broker', 'count', 'bamount', 'bcount', 'samount', 'scount', 'top3']
LHB_JGZZ_COLS = ['code', 'name', 'bamount', 'bcount', 'samount', 'scount', 'net']
LHB_JGMX_COLS = ['code', 'name', 'date', 'bamount', 'samount', 'type']
TERMINATED_COLS = ['code', 'name', 'oDate', 'tDate']
DP_COLS = ['report_date', 'quarter', 'code', 'name', 'plan']
DP_163_COLS = ['code', 'name', 'year', 'plan', 'report_date']
XSG_COLS = ['code', 'name', 'date', 'count', 'ratio']
QUARTS_DIC = {'1':('%s-12-31', '%s-03-31'), '2':('%s-03-31', '%s-06-30'),
'3':('%s-06-30', '%s-09-30'), '4':('%s-9-30', '%s-12-31')}
FUND_HOLDS_COLS = ['count', 'clast', 'date', 'ratio', 'amount', 'nums','nlast', 'name', 'code']
NEW_STOCKS_COLS = ['code', 'name', 'ipo_date', 'issue_date', 'amount', 'markets', 'price', 'pe',
'limit', 'funds', 'ballot']
MAR_SH_COOKIESTR = '_gscu_1808689395=27850607moztu036'
MAR_SH_HZ_COLS = ['opDate', 'rzye', 'rzmre', 'rqyl', 'rqylje', 'rqmcl', 'rzrqjyzl']
MAR_SH_MX_COLS = ['opDate', 'stockCode', 'securityAbbr', 'rzye', 'rzmre', 'rzche', 'rqyl', 'rqmcl', 'rqchl']
MAR_SZ_HZ_COLS = ['rzmre', 'rzye', 'rqmcl', 'rqyl', 'rqye', 'rzrqye']
MAR_SZ_MX_COLS = ['stockCode', 'securityAbbr', 'rzmre', 'rzye', 'rqmcl', 'rqyl', 'rqye', 'rzrqye']
MAR_SZ_HZ_MSG = 'please do not input more than a year,you can obtaining the data year by year.'
MAR_SZ_HZ_MSG2 = 'start and end date all need input.'
|
[
"jimmysoa@sina.cn"
] |
jimmysoa@sina.cn
|
bc618a076ffea315a02334a8d7c62eb10b73ccba
|
ad168daa14c475a0054779bb2f20e7a0d8aad67c
|
/old/SubCoordSys.py
|
110e91f1ab638210e052ee00c95db3175d9bfead
|
[] |
no_license
|
Magnus93/BenzierPy
|
46a7e94031f3041a4d189172d3feca854431b846
|
56974241b471729f849a10b77e928fb80380956f
|
refs/heads/master
| 2021-01-20T05:15:39.084599
| 2018-06-11T13:59:41
| 2018-06-11T13:59:41
| 101,424,502
| 0
| 0
| null | 2018-06-11T13:59:42
| 2017-08-25T16:57:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
import anchor02
import pygame
import mouse
import trans2D
screen = pygame.display.set_mode((800,600))
mytimer = pygame.time.Clock()
class CoordSys:
def __init__(self, mv0, mv1):
self.p0 = mv0
self.p1 = mv1
self.scale = 1
self.sin = 1
self.cos = 1
self.calcValues()
def calcValues(self):
self.scale = (trans2D.distance((self.p0.x, self.p0.y),(self.p1.x, self.p1.y)))/100.0
self.sin = trans2D.sin((self.p0.x,self.p0.y), (self.p1.x,self.p1.y))
self.cos = trans2D.cos((self.p0.x,self.p0.y), (self.p1.x,self.p1.y))
def worldToLocal(self, pWorld):
#inv = inverse
# pLocal(pWorld) = inv(S)*inv(R)*inv(T)*pWorld
iTpW = trans2D.translate(pWorld, trans2D.negate((self.p0.x, self.p0.y)))
iRiTpW = trans2D.rotateTrig(iTpW, (0,0), -self.sin, self.cos)
pLocal = trans2D.scale(iRiTpW, (0,0) ,(1.0/self.scale))
return pLocal
def localToWorld(self, pLocal):
# pWorld(pLocal) = T*R*S*pLocal
self.calcValues()
SpL = trans2D.scale(pLocal, (0,0), self.scale)
RSpL = trans2D.rotateTrig(SpL, (0,0), self.sin, self.cos)
pWorld = trans2D.translate(RSpL, (self.p0.x, self.p0.y))
return pWorld
def drawGrid(self):
for i in range(0,101, 10):
pWorldStart = self.localToWorld((0,i-50))
pWorldEnd = self.localToWorld((100,i-50))
pygame.draw.line(screen, 0xff0000, pWorldStart, pWorldEnd, 1)
pWorldStart = self.localToWorld((i,-50))
pWorldEnd = self.localToWorld((i,50))
pygame.draw.line(screen, 0x00ff00, pWorldStart, pWorldEnd, 1)
if __name__ == "__main__":
circ1 = anchor02.Variable("start", 500, 400)
circ2 = anchor02.Variable("end", 40, 30)
coo = CoordSys(circ1, circ2)
### --- Testing world to local localto world ---
a0 = (3,0)
a1 = coo.localToWorld(a0)
a2 = coo.worldToLocal(a1)
print a0, a1, a2
print "________________________"
a0 = (100,100)
a1 = coo.localToWorld(a0)
a2 = coo.worldToLocal(a1)
print a0, a1, a2
###
while(True):
screen.fill(0x222222)
circ1.run()
circ2.run()
mouse.run()
coo.drawGrid()
pygame.display.flip()
mytimer.tick(24)
|
[
"magnus.ja.gustafsson@gmail.com"
] |
magnus.ja.gustafsson@gmail.com
|
cf8f06cbc0467e7a348a264ef447ba41dff0fe89
|
372647ad5f8a40754116c2b79914708e46960aef
|
/ivi/dicon/__init__.py
|
f5112f804a138755dd00d2293a646501207f3aa4
|
[
"MIT"
] |
permissive
|
edupo/python-ivi
|
52392decb01bc89c6e1b42cbcbd1295a131e91f5
|
8105d8064503725dde781f0378d75db58defaecb
|
refs/heads/master
| 2020-03-31T21:06:02.059885
| 2018-10-04T12:34:38
| 2018-10-04T12:34:38
| 152,567,486
| 0
| 0
|
MIT
| 2018-10-11T09:40:35
| 2018-10-11T09:40:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Programmable fiberoptic instrument
from .diconGP700 import diconGP700
|
[
"alex@alexforencich.com"
] |
alex@alexforencich.com
|
ede899f4c9e41b7a923550f59e07dc307499712f
|
4ccf9337701752fc5d11c5d4f2dc2bd75470df08
|
/genGcode/genGcode_simplify.py
|
d616e484659cfb4e4ed4639f1ddd1e1af1814660
|
[] |
no_license
|
MartianSheep/Elevate_Gcode
|
433ca82c3dc9986ba85a9023810f5b2099f9ef10
|
2cea1ed402b71aef52f69c933f88815eb47347a7
|
refs/heads/master
| 2022-11-26T07:22:19.448073
| 2020-08-04T19:55:01
| 2020-08-04T19:55:01
| 278,013,874
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,718
|
py
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
from PIL import ImageFilter
import numpy as np
import potrace
class Piture():
def __init__(self,filepath):
self.img=mpimg.imread(filepath)
self.h,self.w,self.c=self.img.shape
self.pre=self.img
self.gcode=['G28']
self.x_max=40
self.y_max=40
#----------------------convert to gray scale----------------------------
def gray_scale(self):
print('RBG to gray scale...')
gray = np.ones(self.img.shape) # new array for gray scale
for i in range(self.h):
for j in range(self.w):
Y = (0.3*self.img[i,j,0]+0.59*self.img[i,j,1]+0.11*self.img[i,j,2])/255
# print(Y)
gray[i,j]=np.array([Y,Y,Y])
self.pre=np.abs(gray-1)
return gray
#-----------------------------------------------------------------------
#-----------------------Show the image on the screen---------------------------
def show(self):
plt.imshow(self.pre)
plt.axis('off')
plt.show()
#------------------------------------------------------------------------
#-----------------------Save the image---------------------------
def saveImg(self, output):
plt.imshow(self.pre)
plt.axis('off')
plt.imsave(output + '.jpg', self.pre)
print('Save ' + output + '.jpg')
#------------------------------------------------------------------------
#-----------------------Generate Gcode---------------------------
def gen_gcode(self):
print('generate gcode...')
# bmp=potrace.Bitmap(self.pre[:,:]) # binary fill
bmp=potrace.Bitmap(self.pre[:,:,0])
path=bmp.trace()
flag = 0
for curve in path:
ratio=self.x_max/max(self.w,self.h) #normalize for drawing machine
self.gcode.append('M280 P0 S60') #抬筆
self.gcode.append('G0 X%.4f Y%.4f'%(curve.start_point[0]*ratio,curve.start_point[1]*ratio)) #移動到起始點
self.gcode.append('M280 P0 S0') #下筆
for segment in curve:
# print(segment)
if segment.is_corner:
self.gcode.append('G1 X%.4f Y%.4f'%(segment.c[0]*ratio,segment.c[1]*ratio)) #畫至corner的轉角點
self.gcode.append('G1 X%.4f Y%.4f'%(segment.end_point[0]*ratio,segment.end_point[1]*ratio)) #畫至corner的終點
else:
self.gcode.append('G1 X%.4f Y%.4f'%(segment.end_point[0]*ratio,segment.end_point[1]*ratio)) #畫至Bezier segment的終點
self.gcode.append('M280 P0 S60') #抬筆
return self.gcode
#------------------------------------------------------------------------
#-----------------------Save Gcode---------------------------
def save_gcode(self):
with open('output.txt','w') as f:
for i in range(len(self.gcode)):
f.write('%s\n'%self.gcode[i])
#--------------------------------------------------------------------
#---------------------------convert to binary image---------------------
def binary_image(self,threshold):
print('converting to binary image...')
self.pre[self.pre[:,:,0]>threshold] = np.array([1,1,1])
self.pre[self.pre[:,:,0]<=threshold] = np.array([0,0,0])
return self.pre
#-----------------------------------------------------------------------------
if __name__=='__main__':
pic=Piture('img/bear.jpg') #輸入圖片的路徑
pic.gray_scale()
pic.binary_image(0.75)
pic.show()
gcode=pic.gen_gcode()
pic.save_gcode()
|
[
"mkmarsscience@gmail.com"
] |
mkmarsscience@gmail.com
|
5176aef0089034410f44ffccfc4da3c492c73b89
|
9b8502767061c6ff1fafd62e22846a4e45780013
|
/locallibrary/catalog/admin.py
|
815e9d6073efd31df8a17417134e74c705b7ea2a
|
[] |
no_license
|
zacharywendholt/personalWebsite
|
c5fe557f584322ca6940c96f0b95614075eb2727
|
d89ea60be4831e3f35e1d7efb2ae1fe7818bbeb5
|
refs/heads/master
| 2022-12-13T04:00:55.155513
| 2020-08-24T23:38:49
| 2020-08-24T23:38:49
| 276,281,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
from django.contrib import admin
from .models import Author, Genre, Book, BookInstance
# Register your models here.
#admin.site.register(Book)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
# Register the Admin classes for Book using the decorator
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
# Register the Admin classes for BookInstance using the decorator
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_filter = ('status', 'due_back')
admin.site.register(Author, AuthorAdmin)
admin.site.register(Genre)
|
[
"zacharywendholt@gmail.com"
] |
zacharywendholt@gmail.com
|
afd85d4c73844c5cc1c8b3f534416460df5955a4
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GUPnPDLNA/ContainerInformationClass.py
|
25afb71d5c36d22089d5bb91466a302acde8d762
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 5,687
|
py
|
# encoding: utf-8
# module gi.repository.GUPnPDLNA
# from /usr/lib64/girepository-1.0/GUPnPDLNA-2.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
class ContainerInformationClass(__gi.Struct):
"""
:Constructors:
::
ContainerInformationClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
get_mime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
get_mpeg_version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
get_packet_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
get_profile = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
get_variant = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
is_system_stream = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(ContainerInformationClass), '__module__': 'gi.repository.GUPnPDLNA', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'ContainerInformationClass' objects>, '__weakref__': <attribute '__weakref__' of 'ContainerInformationClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f6784734680>, 'get_mpeg_version': <property object at 0x7f67847347c0>, 'get_packet_size': <property object at 0x7f6784734860>, 'get_profile': <property object at 0x7f6784734950>, 'is_system_stream': <property object at 0x7f6784734a90>, 'get_variant': <property object at 0x7f6784734b80>, 'get_mime': <property object at 0x7f6784734c70>, '_reserved': <property object at 0x7f6784734d60>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(ContainerInformationClass)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
daff8e5c7182e4ea01be7a0621eb5665a0140644
|
ec37e0ad19acc36372171a17dccb4a95da38ac24
|
/TemaSI1/km_server/km.py
|
177c0da68c3bf39a543da58a68908a9f3b4ca2f4
|
[] |
no_license
|
tuguimadalinaa/Homework1-SI
|
586ad876aae37c8acabf0af6c6835d7c6f34c1f7
|
58e631f7899902d4cf9f03a225161d23cae0898a
|
refs/heads/main
| 2023-01-04T04:31:45.635851
| 2020-11-03T07:53:21
| 2020-11-03T07:53:21
| 306,423,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
import socket
import time
import random
import Crypto
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
data_for_server = {'TCP_IP': '127.0.0.1', 'TCP_PORT': 3000, "BUFFER_SIZE": 1024}
AES_data = {'K3': b'1234567891234568', 'iv': b'\xad\xbe\xf6\xc2\xb3p\x10I\xc6\x96 M\xb9\xa1\x96b'}
mode = None
KM = dict()
KM["CBC_key"] = b'abcdabcdabcdabcd'
KM["OFB_key"] = b'abcdabcdabcdabcd'
KM["KEY_3"] = AES_data["K3"]
print("Km server started")
def get_encryption_type():
data = ["CBC", "OFB"]
return random.choice(data)
while 1:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((data_for_server["TCP_IP"], data_for_server['TCP_PORT']))
s.listen(1)
conn, addr = s.accept()
data = conn.recv(data_for_server["BUFFER_SIZE"])
data = data.decode()
if not data: break
print("Node KM received data: ", data)
if data == "CBC":
mode = "CBC"
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
aes_key = aes.encrypt(pad(KM["CBC_key"], AES.block_size))
conn.send(aes_key)
time.sleep(1)
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
aes_iv = aes.encrypt(AES_data['iv'])
conn.send(aes_iv)
time.sleep(1)
elif data == "OFB":
mode = "OFB"
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
aes_key = aes.encrypt(pad(KM["OFB_key"], AES.block_size))
conn.send(aes_key)
time.sleep(1)
print(AES_data['iv'])
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
aes_iv = aes.encrypt(AES_data['iv'])
conn.send(aes_iv)
print(aes_iv)
time.sleep(1)
elif data == 'key_refresh':
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
AES_data['iv'] = Crypto.Random.get_random_bytes(AES.block_size)
aes_key = None
if mode == "CBC":
KM["CBC_key"] = Crypto.Random.get_random_bytes(AES.block_size)
aes_key = aes.encrypt(pad(KM["CBC_key"], 16))
elif mode == "OFB":
KM["OFB_key"] = Crypto.Random.get_random_bytes(AES.block_size)
aes_key = aes.encrypt(pad(KM["OFB_key"], 16))
conn.send(aes_key)
aes = AES.new(AES_data["K3"], AES.MODE_ECB)
aes_iv = aes.encrypt(AES_data['iv'])
conn.send(aes_iv)
time.sleep(1)
conn.send(get_encryption_type().encode())
else:
conn.send("does not exist".encode())
conn.close()
|
[
"tuguimadalinaa@gmail.com"
] |
tuguimadalinaa@gmail.com
|
ada482c584c8db533f2adf5cd8b8c477eff5ea8d
|
0109433801b0116f3e575324fee9b27d4f6e1506
|
/registrasi/migrations/0001_initial.py
|
1f7119cc0a2b92fe606fc167d5f85b400717b15b
|
[] |
no_license
|
yaumil94/sahara-project
|
116255a544b044364ba3d86b8dcbd0a8656f2551
|
1c345d5e5f9b2ba39154c54abda243fa0bee3d79
|
refs/heads/master
| 2020-07-03T14:54:05.850495
| 2015-10-21T13:43:06
| 2015-10-21T13:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('user_ptr', models.OneToOneField(to=settings.AUTH_USER_MODEL, auto_created=True, primary_key=True, serialize=False, parent_link=True)),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Paket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama', models.CharField(max_length=30)),
('harga', models.IntegerField()),
],
),
migrations.CreateModel(
name='Pembayaran',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jenis', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='member',
name='paket',
field=models.ForeignKey(to='registrasi.Paket'),
),
migrations.AddField(
model_name='member',
name='pembayaran',
field=models.ForeignKey(to='registrasi.Pembayaran'),
),
]
|
[
"yanwarsolahudinn@gmail.com"
] |
yanwarsolahudinn@gmail.com
|
b654b39431cf208a9c81a1924cb0e9b756bf502f
|
ea0296b94a4b319f0ea1f99045a03cf5230ceb20
|
/blog/views.py
|
dc2aeaed69e0f812db471d943a11021242fe7df7
|
[] |
no_license
|
nejilabs/CS-PythonDjangoTutorial-BlogSocialMediaWebApp-20190218
|
d7a682ca63da7c4889e514e92b59a2f96a95fc90
|
5e2172fbc5a86a24efe60140919f4a75ff83a6ec
|
refs/heads/master
| 2023-05-08T23:28:50.650198
| 2021-05-04T07:13:46
| 2021-05-04T07:13:46
| 278,018,076
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
CreateView,
DetailView,
ListView,
UpdateView,
DeleteView
)
from .models import Post
def about(request):
return render(request,'blog/about.html', {'title': 'About'})
class PostListView(ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 5
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin,CreateView):
model = Post
fields = ['title','content']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model = Post
fields = ['title','content']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model = Post
success_url = "/"
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
|
[
"nelsonalbajr@yahoo.com"
] |
nelsonalbajr@yahoo.com
|
f455c273b1a7d058a7b6728157bc044a61a2736a
|
31b787404e5a9a304d312a2f7b52b64062a5391c
|
/Twitter -SA - GUI/scrapper.py
|
a9d58503bcab10cc209f674bfab45bae1735b98c
|
[
"MIT"
] |
permissive
|
jeev20/Twitter-Sentiment-Analyzer-GUI
|
7039504640425f809b91237a4a3b7abbcd34e803
|
9d09c53ccde3c38fd94a3fe0834087532dbff127
|
refs/heads/master
| 2021-01-12T02:08:40.287749
| 2017-01-09T23:17:50
| 2017-01-09T23:17:50
| 78,477,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
# This program scraps twitter pages for tweets. Is written by jeev20. https://github.com/jeev20
from bs4 import BeautifulSoup
from urllib import *
import urllib
from textblob import TextBlob
import sys
import time
import webbrowser
# open a public URL, in this case, the webbrowser
url1 = "https://twitter.com/twitter"
webbrowser.open_new_tab(url1)
time.sleep(3)
# user can input twitter account
url = raw_input("Please paste link to twitter account : ")
#input webaddress of the twitter account
webadd = urllib.urlopen(url).read()
soup = BeautifulSoup(webadd, "html.parser")
#tweet title extraction
def title():
return(soup.title.text)
# function to return latest tweet
def getText():
for tweets in soup.find_all('div',{"class": "content"}):
twe = (tweets.find('p').text)
return twe
def time():
for tweets in soup.find_all('a',class_="tweet-timestamp js-permalink js-nav js-tooltip"):
time = (tweets.get_text())
return time
# function returning sentiment analysis value for the tweet
def getTextsa():
twe = getText()
test = TextBlob(twe)
sa = (test.sentiment.polarity)
return sa
#used for debugging
#prints the tweet and the sentiment analysis value
print title()
print ""
print getText()
print ""
print time()
print ""
print "Sentiment Value is: ",float ("%.4f" %(getTextsa()))
|
[
"noreply@github.com"
] |
jeev20.noreply@github.com
|
efb1bf096c32547eaf7c902ddf61205cf9be740d
|
6c10c6e229014dc3bf14efaec2ea8bf07c406752
|
/AILearning/MordernCNN/ConvolutionNetWork.py
|
83f2f779f1bcb1de50e984c7b9e062ded4e76acc
|
[] |
no_license
|
GuyRobot/AIPythonExamples
|
e59c6edb355d9cadee2b3f19a087b1b656956262
|
4acdd0d4966e31a616910554bc075b641aa152df
|
refs/heads/master
| 2021-05-21T13:05:49.615593
| 2021-02-28T06:41:04
| 2021-02-28T06:41:04
| 252,662,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
from mxnet import gluon, nd, autograd
from mxnet.gluon import nn
def corr2D(X, K):
h, w = K.shape
Y = nd.zeros(shape=(X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
class Conv2D(nn.Block):
def __init__(self, kernel_size, **kwargs):
super(Conv2D, self).__init__(**kwargs)
self.weights = self.params.get('weight', shape=kernel_size)
self.bias = self.params.get('bias', shpae=(1, ))
def forward(self, x):
return corr2D(x, self.weights.data()) + self.bias.data()
X = nd.ones((6, 8))
X[:, 2:6] = 0
K = nd.array([[-1, 1]])
Y = corr2D(X, K)
conv2D = nn.Conv2D(1, (1, 2))
conv2D.initialize()
X = X.reshape(1, 1, 6, 8)
Y = Y.reshape(1, 1, 6, 7)
for i in range(10):
with autograd.record():
Y_hat = conv2D(X)
l = (Y_hat - Y) ** 2
l.backward()
conv2D.weight.data()[:] -= 3e-2 * conv2D.weight.grad()
if (i + 1) % 2:
print('batch %d, loss %f' % (i + 1, l.sum().asscalar()))
print(conv2D.weight.data())
|
[
"bluexker@gmail.com"
] |
bluexker@gmail.com
|
be4ba18a225607317de5d0b373b6c5e85b83b162
|
2599d93919a1cfd9a030e862d10e40e52d287655
|
/project_management_portal/views/create_project/tests/snapshots/snap_test_case_01.py
|
c2c7bf3322f211dea9c961f7b81ee250cf430987
|
[] |
no_license
|
chandramoulidupam/my_projects
|
a6730c44ed2ba7a055d13415067b28ca74f0134b
|
111d7753e2cf867d51681ed41a7ea917deb9aecd
|
refs/heads/master
| 2023-09-02T03:38:59.753682
| 2020-06-01T06:20:48
| 2020-06-01T06:20:48
| 267,607,764
| 0
| 0
| null | 2021-11-15T17:51:30
| 2020-05-28T14:11:11
|
Python
|
UTF-8
|
Python
| false
| false
| 836
|
py
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestCase01CreateProjectAPITestCase::test_case status'] = 400
snapshots['TestCase01CreateProjectAPITestCase::test_case body'] = {
'workflow_type': [
'This field is required.'
]
}
snapshots['TestCase01CreateProjectAPITestCase::test_case header_params'] = {
'content-language': [
'Content-Language',
'en'
],
'content-length': [
'45',
'Content-Length'
],
'content-type': [
'Content-Type',
'application/json'
],
'vary': [
'Accept-Language, Origin, Cookie',
'Vary'
],
'x-frame-options': [
'SAMEORIGIN',
'X-Frame-Options'
]
}
|
[
"chandramoulidupam@gmail.com"
] |
chandramoulidupam@gmail.com
|
9965a2e865fabcba1dbc84d7eaf8219df5ed28ea
|
09fd2a42a931e3e094af9fc5ec6eb57dc9d42660
|
/addismap/manage.py
|
eca96ee0af49c88befcfe25f7689118434a9746b
|
[] |
no_license
|
yohannes15/AddisMap
|
78df24404b1934f5327297efe13f220485f76de5
|
3bcafccd985e92cd1d894b9535c747bf4c42fd4c
|
refs/heads/master
| 2022-11-24T18:01:53.027070
| 2020-08-03T22:46:10
| 2020-08-03T22:46:10
| 271,413,519
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'addismap.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"y.berhane56@gmail.com"
] |
y.berhane56@gmail.com
|
5d185b070693868eb2bcc94da19a530272dec5d9
|
d60ee49abaee6c74c5b777f8f112a7f75f71f029
|
/genome/cnvnator/stats/plot_both.py
|
b76affd22a535a2c3ce4b07527c8af45c714b6a6
|
[] |
no_license
|
ak352/melanomics
|
41530f623b4bfdbd5c7b952debcb47622d1a8e88
|
fc5e6fdb1499616fb25a8dc05259add8a65aeca0
|
refs/heads/master
| 2020-12-24T16:14:42.271416
| 2015-08-06T12:48:52
| 2015-08-06T12:48:52
| 18,439,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,716
|
py
|
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages
samples = [2,4,5,6,7,8]
cnv_files = ["/work/projects/melanomics/analysis/genome/cnvnator/binSize100/patient_%d.cnv.list.NS.PM.tested" % k for k in samples]
def get_cn(filename):
bins_n = {}
coverage_n = {}
coverage_t = {}
k = 0
for line in open(filename):
k += 1
line = line[:-1].split("\t")
loci = line[0:3]
start,end = [int(x) for x in line[1:3]]
if loci[0] not in bins_n:
bins_n[loci[0]] = []
if loci[0] not in coverage_n:
coverage_n[loci[0]] = []
coverage_t[loci[0]] = []
#Append x and y
#bins_n[loci[0]].append(start-1)
#coverage_n[loci[0]].append(2)
bins_n[loci[0]].append(start)
bins_n[loci[0]].append(end-1)
rds = [float(x) for x in line[3:5]]
for i,rd in enumerate(rds):
if rds[i] > 5:
rds[i] = 5
coverage_n[loci[0]].append(rd*2)
coverage_n[loci[0]].append(rd*2)
coverage_t[loci[0]].append(rd*2)
coverage_t[loci[0]].append(rd*2)
#bins_n[loci[0]].append(end)
#coverage_n[loci[0]].append(2)
return bins_n, coverage_n, coverage_t
def get_lengths():
infile = "/work/projects/melanomics/data/NCBI/Homo_sapiens/NCBI/build37.2/Sequence/WholeGenomeFasta/genome.fa.fai"
lengths = {}
for line in open(infile):
line = line[:-1].split("\t")
lengths["chr"+line[0]] = int(line[1])
return lengths
if __name__ == "__main__":
colors = ["r", "g", "b", "magenta", "cyan", "orange"]
for j,cnv_file in enumerate(cnv_files):
figure(figsize=(22,20))
bins_n, coverage_n, coverage_t = get_cn(cnv_file)
chroms = []
for x in range(1,23):
chroms.append("chr"+str(x))
chroms.extend(["chrX", "chrY", "chrMT"])
length = get_lengths()
k = 1
for chrom in chroms:
subplot(5,5,k)
# print len(bins_n[chrom]), len(coverage_n[chrom]), len(coverage_t[chrom])
ratio = [float(x[1])/float(x[0]+0.000001) for x in zip(coverage_n[chrom], coverage_t[chrom])]
# bins = []
# nratio = []
# for i,x in enumerate(ratio):
# if x < 0.5 or x > 1.5:
# bins.append(bins_n[chrom][i])
# nratio.append(x)
# for m in range(0,len(bins),2):
# plot(bins[m:m+2], nratio[m:m+2], color=colors[j], label="patient_%d" % samples[j])
# scatter(bins_n[chrom], ratio, color='r', s=2, label="tumor/normal ratio")
plot(bins_n[chrom], ratio, color='r', lw=3, label="tumor/normal ratio")
#scatter(bins_n[chrom], coverage_t[chrom], color='r', s=2, label="tumor")
if k==25:
legend()
#plot(bins_n[chrom], coverage_n[chrom], color='b')
#plot(bins_t[chrom], coverage_t[chrom], color='r')
ylim([0,5])
xlim([0,length[chrom]])
title(chrom + "(patient_%d)" % samples[j])
print chrom
#print chrom, bins_n[chrom]
#print chrom, coverage_n[chrom]
k+=1
outfile = "/work/projects/melanomics/analysis/genome/cnvnator/binSize100/graphs/patient_%d.cnv.pdf" % samples[j]
savefig(outfile, \
bbox_inches = "tight")
sys.stderr.write("Plots save at %s\n" % outfile)
#plot(bins_n[chrom], coverage_n[chrom])
#scatter(bins_n["chr1"], coverage_n["chr1"])
#fig=gcf()
#fig.set_size_inches(18.5,10.5)
#fig.savefig('foo1.png',dpi=100)
#show()
|
[
"ak@uni.fake"
] |
ak@uni.fake
|
9a2c49f544049615b1e0f7cf64ee36d80f8a921a
|
7e16835a0a47c36bc12e9b0ca8990ab57304914e
|
/flask_security_bundle/views/security_controller.py
|
a250c08932fd067cb114cee5abfb2b45759c4b52
|
[
"MIT"
] |
permissive
|
briancappello/flask-security-bundle
|
b3436152277d4e7886933cf026d8c2e843998c1c
|
d9b97a4408c2001a0d29f5c55a4540a4917abd24
|
refs/heads/master
| 2021-04-15T11:37:20.085204
| 2018-08-24T22:12:01
| 2018-08-24T22:12:01
| 126,606,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,647
|
py
|
from flask import current_app as app, request
from flask_unchained import Controller, route, lazy_gettext as _
from flask_unchained import injectable
from flask_unchained.bundles.sqlalchemy import SessionManager
from http import HTTPStatus
from werkzeug.datastructures import MultiDict
from ..decorators import anonymous_user_required, auth_required
from ..extensions import Security
from ..services import SecurityService, SecurityUtilsService
from ..utils import current_user
class SecurityController(Controller):
def __init__(self,
security: Security = injectable,
security_service: SecurityService = injectable,
security_utils_service: SecurityUtilsService = injectable,
session_manager: SessionManager = injectable):
self.security = security
self.security_service = security_service
self.security_utils_service = security_utils_service
self.session_manager = session_manager
@route(only_if=False)
@auth_required()
def check_auth_token(self):
"""
View function to check a token, and if it's valid, log the user in.
Disabled by default; must be explicitly enabled in your ``routes.py``.
"""
# the auth_required decorator verifies the token and sets current_user,
# just need to return a success response
return self.jsonify({'user': current_user})
@route(methods=['GET', 'POST'])
@anonymous_user_required(msg='You are already logged in', category='success')
def login(self):
"""
View function to log a user in. Supports html and json requests.
"""
form = self._get_form('SECURITY_LOGIN_FORM')
if (form.validate_on_submit()
and self.security_service.login_user(form.user, form.remember.data)):
self.after_this_request(self._commit)
if request.is_json:
return self.jsonify({'token': form.user.get_auth_token(),
'user': form.user})
self.flash(_('flask_security_bundle.flash.login'), category='success')
return self.redirect('SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors:
form = self.security_service.process_login_errors(form)
if request.is_json:
return self.jsonify({'error': form.errors.get('_error')[0]},
code=HTTPStatus.UNAUTHORIZED)
return self.render('login',
login_user_form=form,
**self.security.run_ctx_processor('login'))
@route()
def logout(self):
"""
View function to log a user out. Supports html and json requests.
"""
if current_user.is_authenticated:
self.security_service.logout_user()
if request.is_json:
return '', HTTPStatus.NO_CONTENT
self.flash(_('flask_security_bundle.flash.logout'), category='success')
return self.redirect('SECURITY_POST_LOGOUT_REDIRECT_ENDPOINT')
@route(methods=['GET', 'POST'],
only_if=lambda app: app.config.get('SECURITY_REGISTERABLE'))
@anonymous_user_required
def register(self):
"""
View function to register user. Supports html and json requests.
"""
form = self._get_form('SECURITY_REGISTER_FORM')
if form.validate_on_submit():
user = self.security_service.user_manager.create(**form.to_dict())
self.security_service.register_user(user)
return self.redirect('SECURITY_POST_REGISTER_REDIRECT_ENDPOINT')
return self.render('register',
register_user_form=form,
**self.security.run_ctx_processor('register'))
@route(methods=['GET', 'POST'],
only_if=lambda app: app.config.get('SECURITY_CONFIRMABLE'))
def send_confirmation_email(self):
"""
View function which sends confirmation token and instructions to a user.
"""
form = self._get_form('SECURITY_SEND_CONFIRMATION_FORM')
if form.validate_on_submit():
self.security_service.send_email_confirmation_instructions(form.user)
self.flash(_('flask_security_bundle.flash.confirmation_request',
email=form.user.email), category='info')
if request.is_json:
return '', HTTPStatus.NO_CONTENT
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('send_confirmation_email',
send_confirmation_form=form,
**self.security.run_ctx_processor('send_confirmation_email'))
@route('/confirm/<token>',
only_if=lambda app: app.config.get('SECURITY_CONFIRMABLE'))
def confirm_email(self, token):
"""
View function to confirm a user's token from the confirmation email send to them.
Supports html and json requests.
"""
expired, invalid, user = \
self.security_utils_service.confirm_email_token_status(token)
if not user or invalid:
invalid = True
self.flash(_('flask_security_bundle.flash.invalid_confirmation_token'),
category='error')
already_confirmed = user is not None and user.confirmed_at is not None
if expired and not already_confirmed:
self.security_service.send_email_confirmation_instructions(user)
self.flash(_('flask_security_bundle.flash.confirmation_expired',
email=user.email,
within=app.config.get('SECURITY_CONFIRM_EMAIL_WITHIN')),
category='error')
if invalid or (expired and not already_confirmed):
return self.redirect('SECURITY_CONFIRM_ERROR_REDIRECT_ENDPOINT',
'security_controller.send_confirmation_email')
if self.security_service.confirm_user(user):
self.after_this_request(self._commit)
self.flash(_('flask_security_bundle.flash.email_confirmed'),
category='success')
else:
self.flash(_('flask_security_bundle.flash.already_confirmed'),
category='info')
if user != current_user:
self.security_service.logout_user()
self.security_service.login_user(user)
return self.redirect('SECURITY_POST_CONFIRM_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
@route(methods=['GET', 'POST'],
only_if=lambda app: app.config.get('SECURITY_RECOVERABLE'))
@anonymous_user_required(msg='You are already logged in',
category='success')
def forgot_password(self):
"""
View function to request a password recovery email with a reset token.
Supports html and json requests.
"""
form = self._get_form('SECURITY_FORGOT_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.send_reset_password_instructions(form.user)
self.flash(_('flask_security_bundle.flash.password_reset_request',
email=form.user.email), category='info')
if request.is_json:
return '', HTTPStatus.NO_CONTENT
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('forgot_password',
forgot_password_form=form,
**self.security.run_ctx_processor('forgot_password'))
@route('/reset-password/<string:token>', methods=['GET', 'POST'],
only_if=lambda app: app.config.get('SECURITY_RECOVERABLE'))
@anonymous_user_required
def reset_password(self, token):
"""
View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests.
"""
expired, invalid, user = \
self.security_utils_service.reset_password_token_status(token)
if invalid:
self.flash(_('flask_security_bundle.flash.invalid_reset_password_token'),
category='error')
return self.redirect('SECURITY_INVALID_RESET_TOKEN_REDIRECT')
elif expired:
self.security_service.send_reset_password_instructions(user)
self.flash(_('flask_security_bundle.flash.password_reset_expired',
email=user.email,
within=app.config.get('SECURITY_RESET_PASSWORD_WITHIN')),
category='error')
return self.redirect('SECURITY_EXPIRED_RESET_TOKEN_REDIRECT')
spa_redirect = app.config.get('SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT')
if request.method == 'GET' and spa_redirect:
return self.redirect(spa_redirect, token=token, _external=True)
form = self._get_form('SECURITY_RESET_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.reset_password(user, form.password.data)
self.security_service.login_user(user)
self.after_this_request(self._commit)
self.flash(_('flask_security_bundle.flash.password_reset'),
category='success')
if request.is_json:
return self.jsonify({'token': user.get_auth_token(),
'user': user})
return self.redirect('SECURITY_POST_RESET_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('reset_password',
reset_password_form=form,
reset_password_token=token,
**self.security.run_ctx_processor('reset_password'))
@route(methods=['GET', 'POST'],
only_if=lambda app: app.config.get('SECURITY_CHANGEABLE'))
@auth_required
def change_password(self):
"""
View function for a user to change their password.
Supports html and json requests.
"""
form = self._get_form('SECURITY_CHANGE_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.change_password(
current_user._get_current_object(),
form.new_password.data)
self.after_this_request(self._commit)
self.flash(_('flask_security_bundle.flash.password_change'),
category='success')
if request.is_json:
return self.jsonify({'token': current_user.get_auth_token()})
return self.redirect('SECURITY_POST_CHANGE_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('change_password',
change_password_form=form,
**self.security.run_ctx_processor('change_password'))
def _get_form(self, name):
form_cls = app.config.get(name)
if request.is_json:
return form_cls(MultiDict(request.get_json()))
return form_cls(request.form)
def _commit(self, response=None):
self.session_manager.commit()
return response
|
[
"briancappello@gmail.com"
] |
briancappello@gmail.com
|
e7ea88b1c6c9c2d142b0f97f9ca2d143d8f0be8a
|
f90d6eb8c815b26be1c839ed5bbe0c65f7079e65
|
/69.py
|
43e28bbb26cf587ba0d74e36a5923745b207c84c
|
[] |
no_license
|
zhangxu0307/leetcode_solutions
|
0c2b76e838f60e48f392605166bd2b6725686740
|
c5779489b3e71a12008023d8776ca0f1c10321e0
|
refs/heads/master
| 2021-01-24T16:52:58.555076
| 2018-02-28T02:40:32
| 2018-02-28T02:40:32
| 123,217,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
start = 1
end = x
mid = 0
while start <= end:
mid = (end + start)/2
if x/mid == mid:
return mid
elif x/mid > mid:
start = mid+1
elif x/mid < mid:
end = mid-1
return end
x = 2
s = Solution()
ans = s.mySqrt(x)
print ans
|
[
"zhangxu0307@163.com"
] |
zhangxu0307@163.com
|
d683a95ff6e6491370e8718e4a90b876a96cd363
|
b241363657adce376209818d6b0c158c23c73164
|
/crm/migrations/0002_airconditioning_environment_lamp.py
|
2166bd6c4a3626a5b8aa9f2ec938a2c9e3aef4d8
|
[] |
no_license
|
hesllerhuller/django-sass
|
c93cf37bf49e5109668c79e4e4fa95c3870b4fe7
|
342d244541bd170fa8be74b71ae62f3b488a86b0
|
refs/heads/main
| 2023-02-27T22:41:53.809489
| 2021-02-01T19:13:19
| 2021-02-01T19:13:19
| 332,250,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
# Generated by Django 3.1.5 on 2021-01-23 21:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Environment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('local', models.CharField(max_length=255)),
('t_a', models.FloatField(blank=True)),
('t_t', models.FloatField(blank=True)),
('umd', models.FloatField(blank=True)),
('n_g', models.FloatField(blank=True)),
],
),
migrations.CreateModel(
name='Lamp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('power', models.FloatField(default=0)),
('on_off', models.BooleanField(default=0)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.environment')),
],
),
migrations.CreateModel(
name='AirConditioning',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('power', models.FloatField(default=0)),
('brand', models.CharField(max_length=255)),
('model', models.CharField(max_length=255)),
('on_off', models.BooleanField(default=0)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.environment')),
],
),
]
|
[
"hesllerh@gmail.com"
] |
hesllerh@gmail.com
|
9dc92a48621a56da91a79652ea3a06b42920b5bc
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/jobs/transforms/validation/subtopic_validation_test.py
|
50e9eb7ac2fe6deac4b10c427f91ce9e4bb79367
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 9,929
|
py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.transforms.subtopic_validation."""
from __future__ import annotations
from core.jobs import job_test_utils
from core.jobs.transforms.validation import subtopic_validation
from core.jobs.types import base_validation_errors
from core.platform import models
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import subtopic_models
(base_models, subtopic_models) = models.Registry.import_models(
[models.Names.BASE_MODEL, models.Names.SUBTOPIC])
class ValidateSubtopicCommitCmdsSchemaTests(job_test_utils.PipelinedTestBase):
def test_validate_change_domain_implemented(self) -> None:
valid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='delete',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([valid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [])
def test_subtopic_page_change_object_with_missing_cmd(self) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='delete',
commit_cmds=[{'invalid': 'data'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'invalid': 'data'},
'Missing cmd key in change dict')
])
def test_subtopic_page_change_object_with_invalid_cmd(self) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='delete',
commit_cmds=[{'cmd': 'invalid'}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'invalid'},
'Command invalid is not allowed')
])
def test_subtopic_page_change_object_with_missing_attribute_in_cmd(
self
) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='edit',
commit_cmds=[{
'cmd': 'update_subtopic_page_property',
'property_name': '<p>page_contents_html</p>',
'subtopic_id': 'subtopic_id'
}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{
'cmd': 'update_subtopic_page_property',
'property_name': '<p>page_contents_html</p>',
'subtopic_id': 'subtopic_id'
},
'The following required attributes are missing: '
'new_value, old_value')
])
def test_subtopic_page_change_object_with_extra_attribute_in_cmd(
self
) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='create',
commit_cmds=[{
'cmd': 'create_new',
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{
'cmd': 'create_new',
'topic_id': 'topic_id',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
},
'The following extra attributes are present: invalid')
])
def test_subtopic_page_change_object_with_invalid_subtopic_page_property(
self
) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer_id',
commit_type='edit',
commit_cmds=[{
'cmd': 'update_subtopic_page_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{
'cmd': 'update_subtopic_page_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
},
'Value for property_name in cmd '
'update_subtopic_page_property: invalid is not allowed')
])
class ValidateSubtopicPageCommitLogEntryModelTests(
job_test_utils.PipelinedTestBase):
def test_validate_subtopic_page_model(self) -> None:
valid_commit_cmd_model = (
subtopic_models.SubtopicPageCommitLogEntryModel(
id='subtopicpage_id123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
commit_type='test-type',
user_id='',
subtopic_page_id='123',
post_commit_status='private',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([valid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageCommitLogEntryModel())
)
self.assert_pcoll_equal(output, [])
def test_raises_commit_cmd_none_error(self) -> None:
invalid_commit_cmd_model = (
subtopic_models.SubtopicPageCommitLogEntryModel(
id='model_id123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
commit_type='test-type',
user_id='',
subtopic_page_id='123',
post_commit_status='private',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
subtopic_validation.ValidateSubtopicPageCommitLogEntryModel(
))
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsNoneError(
invalid_commit_cmd_model)
])
|
[
"noreply@github.com"
] |
oppia.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.