blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d62cbd757a73de4d12b4bf3a14c3779c20eb6bc0 | b26a9796c3fdcf4b10932b9043399e409558c90e | /mdio/config/desktop.py | 2c86465c9e3f622bc2215d7cfc338012929f7aba | [
"MIT"
] | permissive | neilLasrado/mdio | 92d034ef27f2e7831a41455e944a80f9a4a61e4d | daec26c101f96819db97db8bf9e7ba29f9738687 | refs/heads/master | 2020-03-25T06:40:58.039473 | 2018-09-16T19:04:46 | 2018-09-16T19:04:46 | 143,518,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
"District": {
"color": "#9b59b6",
"icon": "icon-globe",
"icon": "octicon octicon-globe",
"link": "List/District",
"doctype": "District",
"type": "list"
},
"Project": {
"color": "#c23c59",
"icon": "octicon octicon-rocket",
"label": _("Project"),
"link": "List/District Project",
"doctype": "District Project",
"type": "list"
}
}
| [
"neil@digithinkit.com"
] | neil@digithinkit.com |
3493381777ce41dcb975ad7f011e2b61b299f283 | 69d0deb5921edc82eea0ae184db99b87a0ca6900 | /catkin_ws/build/srrg2_solver_calib_addons/catkin_generated/pkg.installspace.context.pc.py | f987c6cfb6f21976c245dc0a57e1233ba5d4bbf7 | [
"MIT"
] | permissive | laaners/progetto-labiagi_pick_e_delivery | 8d4006e206cd15b90b7e2291876c2b201e314621 | 3453bfbc1dd7562c78ba06c0f79b069b0a952c0e | refs/heads/main | 2023-08-19T00:17:51.491475 | 2021-09-16T16:35:45 | 2021-09-16T16:35:45 | 409,192,385 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsrrg2_solver_calib_utils_library".split(';') if "-lsrrg2_solver_calib_utils_library" != "" else []
PROJECT_NAME = "srrg2_solver_calib_addons"
PROJECT_SPACE_DIR = "/home/alessiohu/Desktop/progetto-labiagi/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"hu.183947@studenti.uniroma1.it"
] | hu.183947@studenti.uniroma1.it |
3ad37185c077b3819db6c35bc0d6421a09e4a073 | c9ebca529d290fc2fa94b5ce5b61657e431260e3 | /interviewcake/product-of-other-numbers.py | afaedd1b08667961316b7027227457fc3a831e8f | [] | no_license | aaakashkumar/competitive_programming | 58677e3166636d042d00e1d2be589499c5f17c1a | 575fa25c4586fa41b3d45d95dca6eff9584c3a4a | refs/heads/main | 2023-04-29T03:46:02.911230 | 2021-05-15T10:12:50 | 2021-05-15T10:12:50 | 305,634,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | # https://www.interviewcake.com/question/python3/product-of-other-numbers?course=fc1§ion=greedy
# @author Akash Kumar
import unittest
def get_products_of_all_ints_except_at_index(int_list):
# Make a list with the products
left_elements_product = [1]*len(int_list)
right_elements_product = [1]*len(int_list)
left_pointer = 1
right_pointer = len(int_list)-2
left_elements_product[0] = int_list[0]
right_elements_product[len(int_list)-1] = int_list[len(int_list)-1]
while left_pointer < len(int_list)-1:
left_elements_product[left_pointer] = int_list[left_pointer] * \
left_elements_product[left_pointer-1]
right_elements_product[right_pointer] = int_list[right_pointer] * \
right_elements_product[right_pointer+1]
left_pointer += 1
right_pointer -= 1
result_list = []
result_list.append(right_elements_product[1])
for index in range(1, len(int_list)-1):
result_list.append(left_elements_product[index-1] * right_elements_product[index+1])
result_list.append(left_elements_product[len(int_list)-1-1])
return result_list
# Tests
class Test(unittest.TestCase):
def test_small_list(self):
actual = get_products_of_all_ints_except_at_index([1, 2, 3])
expected = [6, 3, 2]
self.assertEqual(actual, expected)
def test_longer_list(self):
actual = get_products_of_all_ints_except_at_index([8, 2, 4, 3, 1, 5])
expected = [120, 480, 240, 320, 960, 192]
self.assertEqual(actual, expected)
def test_list_has_one_zero(self):
actual = get_products_of_all_ints_except_at_index([6, 2, 0, 3])
expected = [0, 0, 36, 0]
self.assertEqual(actual, expected)
def test_list_has_two_zeros(self):
actual = get_products_of_all_ints_except_at_index([4, 0, 9, 1, 0])
expected = [0, 0, 0, 0, 0]
self.assertEqual(actual, expected)
def test_one_negative_number(self):
actual = get_products_of_all_ints_except_at_index([-3, 8, 4])
expected = [32, -12, -24]
self.assertEqual(actual, expected)
def test_all_negative_numbers(self):
actual = get_products_of_all_ints_except_at_index([-7, -1, -4, -2])
expected = [-8, -56, -14, -28]
self.assertEqual(actual, expected)
def test_error_with_empty_list(self):
with self.assertRaises(Exception):
get_products_of_all_ints_except_at_index([])
def test_error_with_one_number(self):
with self.assertRaises(Exception):
get_products_of_all_ints_except_at_index([1])
unittest.main(verbosity=2) | [
"noreply@github.com"
] | noreply@github.com |
ae953f626dcd7a8cc3573ca343fdeac058daa21f | df0c4875b45e68c106dd1e2ba397f71a10794327 | /src/pifetcher/utilities/sys_utils.py | d389d2340abd6f3e65f41dbd8999e6aed152bff2 | [
"MIT"
] | permissive | gavinz0228/pifetcher | c28b407cf4965852af67ffe619a55ee90fa49a72 | c8419ae153eefed04e0e8b239cf1a9226fa91c29 | refs/heads/master | 2021-07-04T20:26:41.973408 | 2020-11-22T16:57:38 | 2020-11-22T16:57:38 | 203,682,327 | 1 | 0 | null | 2019-08-24T17:04:59 | 2019-08-22T00:06:58 | Python | UTF-8 | Python | false | false | 507 | py | from os import path, chmod
from sys import platform
import stat
class SysUtils:
@staticmethod
def ensure_path(file_path):
if not path.exists(file_path):
raise Exception(f'file path {file_path} does not exist.')
else:
return file_path
@staticmethod
def set_executable_permission(file_path):
if platform in ['linux', 'linux2', 'darwin']:
chmod(file_path, stat.S_IRWXO)
chmod(file_path, stat.S_IRWXO)
| [
"gavinz0228@gmail.com"
] | gavinz0228@gmail.com |
f3277b20bb721bebb914ea94042f68ca678765e6 | e331e4f0c321b98acde31faf3548194ae6d7d14b | /qa/rpc-tests/spentindex.py | d1d3c9ccfca34d77274592a835672489a20dd991 | [
"MIT"
] | permissive | MB8Coin/mb8coin-core | 487e3e16e43c008a6913d92e6edcf428c67a1f50 | 1fa5bd60019f6cff8038ace509ec4ca17c8233c7 | refs/heads/master | 2021-10-27T07:12:31.935401 | 2021-10-19T19:02:31 | 2021-10-19T19:02:31 | 131,882,320 | 5 | 3 | MIT | 2019-05-24T14:29:38 | 2018-05-02T17:10:21 | C++ | UTF-8 | Python | false | false | 6,342 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import MB8CoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(MB8CoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-spentindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-spentindex"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-spentindex", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
feeSatoshis = 10000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - feeSatoshis)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeSatoshis)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - feeSatoshis);
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + feeSatoshis) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["satoshis"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
| [
"vidaru@protonmail.com"
] | vidaru@protonmail.com |
34f4f7b2ce5b694d01a386ef1898e24a0a84e375 | a2a3bb37c3228b01681e019ad9781a01f0245195 | /blog/database.py | 5442b2f2d96af0a090ef3619a8e46773cc66481f | [] | no_license | prinudickson/fastapi_learning | 51e84423414d0cc8a6379464e81b6cc0ceebd3a7 | 284835b0cc94d564dc80a3b36e343a96d917ab49 | refs/heads/main | 2023-08-15T05:47:19.374600 | 2021-10-21T19:20:35 | 2021-10-21T19:20:35 | 398,273,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./blog.db"
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | [
"prinu.dickson@nl.pwc.com"
] | prinu.dickson@nl.pwc.com |
8d6cf1588bdda74af37dd6269bec5931e71b5745 | cbeb1d7060dbc281c11c6b029a1d156e1ec7ebfd | /mountains/massif_amorican/cols.py | 962ec60e6f13af3674ab0bd98560b70c7b4298da | [] | no_license | paulkirkwood/py.parcoursdb | 28ceceaf4f44e03d9911892a9a916447cd7c7477 | df2745064e4c66dc0c2d522fc0381bf13a8e7859 | refs/heads/master | 2020-09-14T04:58:10.940799 | 2020-02-24T21:04:31 | 2020-02-24T21:04:31 | 223,024,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import country
from col import Col
from ..util import french_col
def mur_de_bretagne():
return french_col("Mûr-de-Bretagne", 293, 2, 6.9)
| [
"paul@paulandsue.plus.com"
] | paul@paulandsue.plus.com |
33528cd85e5325910f82fa63bb57f9f679aeff7b | 284e9633e979ef51f6b1cf4525a90f2b3d9d1889 | /wasm/tests/test_exec_mode.py | 1914f102e50984a82c10b7d3464413d03322193d | [
"MIT"
] | permissive | JesterOrNot/RustPython | 01778140e2e7beaf2a8e2c3da2ce604d6eb116ba | bbe728e4ce203d6b328087b553acc8f81842f970 | refs/heads/master | 2020-12-14T19:26:42.785389 | 2020-02-04T21:10:09 | 2020-02-04T21:10:09 | 234,846,323 | 0 | 0 | MIT | 2020-01-19T05:36:26 | 2020-01-19T05:36:24 | null | UTF-8 | Python | false | false | 1,279 | py | import time
import sys
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import pytest
def print_stack(driver):
stack = driver.execute_script(
"return window.__RUSTPYTHON_ERROR_MSG + '\\n' + window.__RUSTPYTHON_ERROR_STACK"
)
print(f"RustPython error stack:\n{stack}", file=sys.stderr)
@pytest.fixture(scope="module")
def driver(request):
options = Options()
options.add_argument('-headless')
driver = webdriver.Firefox(options=options)
try:
driver.get("http://localhost:8080")
except Exception as e:
print_stack(driver)
raise
time.sleep(5)
yield driver
driver.close()
def test_eval_mode(driver):
assert driver.execute_script("return window.rp.pyEval('1+1')") == 2
def test_exec_mode(driver):
assert driver.execute_script("return window.rp.pyExec('1+1')") is None
def test_exec_single_mode(driver):
assert driver.execute_script("return window.rp.pyExecSingle('1+1')") == 2
assert driver.execute_script(
"""
var output = [];
save_output = function(text) {{
output.push(text)
}};
window.rp.pyExecSingle('1+1\\n2+2',{stdout: save_output});
return output;
""") == ['2\n', '4\n']
| [
"yanganto@gmail.com"
] | yanganto@gmail.com |
297467e64e5b45612d4fe55253b3388b8442f79f | 770d4df866b9e66a333f3ffeacdd659b8553923a | /results/0193/config.py | fbbe800c6116da5429a209d219fc7846de53d1e2 | [] | no_license | leojo/ResultsOverview | b2062244cbd81bc06b99963ae9b1695fa9718f90 | a396abc7a5b4ab257150c0d37c40b646ebb13fcf | refs/heads/master | 2020-03-20T19:52:37.217926 | 2018-08-05T12:50:27 | 2018-08-05T12:50:27 | 137,656,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-3
learning_rate_max = 1e-3
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-7
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["sax-baritone","violin"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
| [
"leojohannsson91@gmail.com"
] | leojohannsson91@gmail.com |
196c8b2bad841f8d3e41a79fadadc487737449e2 | 9ba901bf45f3d555395988f480f0eb55a595c83a | /轻松学习Python 69个内置函数/装饰器、描述器/staticmethod().py | 2f2c07d4ac1d6dad54d44c890059a344df83e32a | [] | no_license | gkliya/zhouhuajian-course | 803c3c5b390a9204e07a54924a2df8c1246b72be | 616867150efe46e2d3f8720b859ee38f286e4dbd | refs/heads/master | 2023-01-04T01:04:56.224479 | 2020-10-26T10:22:42 | 2020-10-26T10:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | # @staticmethod
# 将方法转换为静态方法。
#
# 静态方法不会接收隐式的第一个参数。要声明一个静态方法,请使用此语法
#
# class C:
# @staticmethod
# def f(arg1, arg2, ...): ...
# @staticmethod 这样的形式称为函数的 decorator -- 详情参阅 函数定义。
class C:
@staticmethod
def f(*args):
print(f'{ args = }')
C.f()
C().f()
# 静态方法的调用可以在类上进行 (例如 C.f()) 也可以在实例上进行 (例如 C().f())。
#
# Python中的静态方法与Java或C ++中的静态方法类似。另请参阅 classmethod() ,用于创建备用类构造函数的变体。
#
# 像所有装饰器一样,也可以像常规函数一样调用 staticmethod ,并对其结果执行某些操作。比如某些情况下需要从类主体引用函数并且您希望避免自动转换为实例方法。对于这些情况,请使用此语法:
#
# class C:
# builtin_open = staticmethod(open)
# 想了解更多有关静态方法的信息,请参阅 标准类型层级结构 。
# 像所有装饰器一样,也可以像常规函数一样调用 staticmethod ,并对其结果执行某些操作。比如某些情况下需要从类主体引用函数并且您希望避免自动转换为实例方法。
class C:
# @staticmethod
def f(*args):
print(f'{ args = }')
f = staticmethod(f)
builtin_abs = staticmethod(abs)
print('-'*20)
C.f()
print(f'{ C.builtin_abs(-5) = }') | [
"noreply@github.com"
] | noreply@github.com |
f8b8ecc8c9afc0614b9a66d3e6d49402720bd1bf | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/boto/sts/connection.py | 8c0cf4b269ba1ac3926620ffdf9f697f9a4c88a2 | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 32,142 | py | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.connection import AWSQueryConnection
from boto.provider import Provider, NO_CREDENTIALS_PROVIDED
from boto.regioninfo import RegionInfo
from boto.sts.credentials import Credentials, FederationToken, AssumedRole
from boto.sts.credentials import DecodeAuthorizationMessage
import boto
import boto.utils
import datetime
import threading
_session_token_cache = {}
class STSConnection(AWSQueryConnection):
"""
AWS Security Token Service
The AWS Security Token Service is a web service that enables you
to request temporary, limited-privilege credentials for AWS
Identity and Access Management (IAM) users or for users that you
authenticate (federated users). This guide provides descriptions
of the AWS Security Token Service API.
For more detailed information about using this service, go to
`Using Temporary Security Credentials`_.
For information about setting up signatures and authorization
through the API, go to `Signing AWS API Requests`_ in the AWS
General Reference . For general information about the Query API,
go to `Making Query Requests`_ in Using IAM . For information
about using security tokens with other AWS products, go to `Using
Temporary Security Credentials to Access AWS`_ in Using Temporary
Security Credentials .
If you're new to AWS and need additional technical information
about a specific AWS product, you can find the product's technical
documentation at `http://aws.amazon.com/documentation/`_.
We will refer to Amazon Identity and Access Management using the
abbreviated form IAM. All copyrights and legal protections still
apply.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sts.amazonaws.com'
APIVersion = '2011-06-15'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, validate_certs=True, anon=False,
security_token=None, profile_name=None):
"""
:type anon: boolean
:param anon: If this parameter is True, the ``STSConnection`` object
will make anonymous requests, and it will not use AWS
Credentials or even search for AWS Credentials to make these
requests.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=STSConnection)
self.region = region
self.anon = anon
self._mutex = threading.Semaphore()
provider = 'aws'
# If an anonymous request is sent, do not try to look for credentials.
# So we pass in dummy values for the access key id, secret access
# key, and session token. It does not matter that they are
# not actual values because the request is anonymous.
if self.anon:
provider = Provider('aws', NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED)
super(STSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
validate_certs=validate_certs,
security_token=security_token,
profile_name=profile_name,
provider=provider)
def _required_auth_capability(self):
if self.anon:
return ['sts-anon']
else:
return ['hmac-v4']
def _check_token_cache(self, token_key, duration=None, window_seconds=60):
token = _session_token_cache.get(token_key, None)
if token:
now = datetime.datetime.utcnow()
expires = boto.utils.parse_ts(token.expiration)
delta = expires - now
if delta < datetime.timedelta(seconds=window_seconds):
msg = 'Cached session token %s is expired' % token_key
boto.log.debug(msg)
token = None
return token
def _get_session_token(self, duration=None,
mfa_serial_number=None, mfa_token=None):
params = {}
if duration:
params['DurationSeconds'] = duration
if mfa_serial_number:
params['SerialNumber'] = mfa_serial_number
if mfa_token:
params['TokenCode'] = mfa_token
return self.get_object('GetSessionToken', params,
Credentials, verb='POST')
def get_session_token(self, duration=None, force_new=False,
mfa_serial_number=None, mfa_token=None):
"""
Return a valid session token. Because retrieving new tokens
from the Secure Token Service is a fairly heavyweight operation
this module caches previously retrieved tokens and returns
them when appropriate. Each token is cached with a key
consisting of the region name of the STS endpoint
concatenated with the requesting user's access id. If there
is a token in the cache meeting with this key, the session
expiration is checked to make sure it is still valid and if
so, the cached token is returned. Otherwise, a new session
token is requested from STS and it is placed into the cache
and returned.
:type duration: int
:param duration: The number of seconds the credentials should
remain valid.
:type force_new: bool
:param force_new: If this parameter is True, a new session token
will be retrieved from the Secure Token Service regardless
of whether there is a valid cached token or not.
:type mfa_serial_number: str
:param mfa_serial_number: The serial number of an MFA device.
If this is provided and if the mfa_passcode provided is
valid, the temporary session token will be authorized with
to perform operations requiring the MFA device authentication.
:type mfa_token: str
:param mfa_token: The 6 digit token associated with the
MFA device.
"""
token_key = '%s:%s' % (self.region.name, self.provider.access_key)
token = self._check_token_cache(token_key, duration)
if force_new or not token:
boto.log.debug('fetching a new token for %s' % token_key)
try:
self._mutex.acquire()
token = self._get_session_token(duration,
mfa_serial_number,
mfa_token)
_session_token_cache[token_key] = token
finally:
self._mutex.release()
return token
def get_federation_token(self, name, duration=None, policy=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
for a federated user. A typical use is in a proxy application
that is getting temporary security credentials on behalf of
distributed applications inside a corporate network. Because
you must call the `GetFederationToken` action using the long-
term security credentials of an IAM user, this call is
appropriate in contexts where those credentials can be safely
stored, usually in a server-based application.
**Note:** Do not use this call in mobile applications or
client-based web applications that directly get temporary
security credentials. For those types of applications, use
`AssumeRoleWithWebIdentity`.
The `GetFederationToken` action must be called by using the
long-term AWS security credentials of the AWS account or an
IAM user. Credentials that are created by IAM users are valid
for the specified duration, between 900 seconds (15 minutes)
and 129600 seconds (36 hours); credentials that are created by
using account credentials have a maximum duration of 3600
seconds (1 hour).
The permissions that are granted to the federated user are the
intersection of the policy that is passed with the
`GetFederationToken` request and policies that are associated
with of the entity making the `GetFederationToken` call.
For more information about how permissions work, see
`Controlling Permissions in Temporary Credentials`_ in Using
Temporary Security Credentials . For information about using
`GetFederationToken` to create temporary security credentials,
see `Creating Temporary Credentials to Enable Access for
Federated Users`_ in Using Temporary Security Credentials .
:type name: string
:param name: The name of the federated user. The name is used as an
identifier for the temporary security credentials (such as `Bob`).
For example, you can reference the federated user name in a
resource-based policy, such as in an Amazon S3 bucket policy.
:type policy: string
:param policy: A policy that specifies the permissions that are granted
to the federated user. By default, federated users have no
permissions; they do not inherit any from the IAM user. When you
specify a policy, the federated user's permissions are intersection
of the specified policy and the IAM user's policy. If you don't
specify a policy, federated users can only access AWS resources
that explicitly allow those federated users in a resource policy,
such as in an Amazon S3 bucket policy.
:type duration: integer
:param duration: The duration, in seconds, that the session
should last. Acceptable durations for federation sessions range
from 900 seconds (15 minutes) to 129600 seconds (36 hours), with
43200 seconds (12 hours) as the default. Sessions for AWS account
owners are restricted to a maximum of 3600 seconds (one hour). If
the duration is longer than one hour, the session for AWS account
owners defaults to one hour.
"""
params = {'Name': name}
if duration:
params['DurationSeconds'] = duration
if policy:
params['Policy'] = policy
return self.get_object('GetFederationToken', params,
FederationToken, verb='POST')
def assume_role(self, role_arn, role_session_name, policy=None,
duration_seconds=None, external_id=None,
mfa_serial_number=None,
mfa_token=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
that you can use to access AWS resources that you might not
normally have access to. Typically, you use `AssumeRole` for
cross-account access or federation.
For cross-account access, imagine that you own multiple
accounts and need to access resources in each account. You
could create long-term credentials in each account to access
those resources. However, managing all those credentials and
remembering which one can access which account can be time
consuming. Instead, you can create one set of long-term
credentials in one account and then use temporary security
credentials to access all the other accounts by assuming roles
in those accounts. For more information about roles, see
`Roles`_ in Using IAM .
For federation, you can, for example, grant single sign-on
access to the AWS Management Console. If you already have an
identity and authentication system in your corporate network,
you don't have to recreate user identities in AWS in order to
grant those user identities access to AWS. Instead, after a
user has been authenticated, you call `AssumeRole` (and
specify the role with the appropriate permissions) to get
temporary security credentials for that user. With those
temporary security credentials, you construct a sign-in URL
that users can use to access the console. For more
information, see `Scenarios for Granting Temporary Access`_ in
AWS Security Token Service .
The temporary security credentials are valid for the duration
that you specified when calling `AssumeRole`, which can be
from 900 seconds (15 minutes) to 3600 seconds (1 hour). The
default is 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed and any policies that are associated with the AWS
resource being accessed. You can further restrict the
permissions of the temporary security credentials by passing a
policy in the request. The resulting permissions are an
intersection of the role's access policy and the policy that
you passed. These policies and any applicable resource-based
policies are evaluated when calls to AWS service APIs are made
using the temporary security credentials.
To assume a role, your AWS account must be trusted by the
role. The trust relationship is defined in the role's trust
policy when the IAM role is created. You must also have a
policy that allows you to call `sts:AssumeRole`.
**Important:** You cannot call `Assumerole` by using AWS
account credentials; access will be denied. You must use IAM
user credentials to call `AssumeRole`.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
The session name is included as part of the `AssumedRoleUser`.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRole` call. The
resulting permissions of the temporary security credentials are an
intersection of this policy and the access policy that is
associated with the role. Use this policy to further restrict the
permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
:type external_id: string
:param external_id: A unique identifier that is used by third parties
to assume a role in their customers' accounts. For each role that
the third party can assume, they should instruct their customers to
create a role with the external ID that the third party generated.
Each time the third party assumes the role, they must pass the
customer's external ID. The external ID is useful in order to help
third parties bind a role to the customer who created it. For more
information about the external ID, see `About the External ID`_ in
Using Temporary Security Credentials .
:type mfa_serial_number: string
:param mfa_serial_number: The identification number of the MFA device that
is associated with the user who is making the AssumeRole call.
Specify this value if the trust policy of the role being assumed
includes a condition that requires MFA authentication. The value is
either the serial number for a hardware device (such as
GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device
(such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9.
Maximum length of 256.
:type mfa_token: string
:param mfa_token: The value provided by the MFA device, if the trust
policy of the role being assumed requires MFA (that is, if the
policy includes a condition that tests for MFA). If the role being
assumed requires MFA and if the TokenCode value is missing or
expired, the AssumeRole call returns an "access denied" errror.
Minimum length of 6. Maximum length of 6.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
if external_id is not None:
params['ExternalId'] = external_id
if mfa_serial_number is not None:
params['SerialNumber'] = mfa_serial_number
if mfa_token is not None:
params['TokenCode'] = mfa_token
return self.get_object('AssumeRole', params, AssumedRole, verb='POST')
def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated via a SAML authentication response.
This operation provides a mechanism for tying an enterprise
identity store or directory to role-based AWS access without
user-specific credentials or configuration.
The temporary security credentials returned by this operation
consist of an access key ID, a secret access key, and a
security token. Applications can use these temporary security
credentials to sign calls to AWS services. The credentials are
valid for the duration that you specified when calling
`AssumeRoleWithSAML`, which can be up to 3600 seconds (1 hour)
or until the time specified in the SAML authentication
response's `NotOnOrAfter` value, whichever is shorter.
The maximum duration for a session is 1 hour, and the minimum
duration is 15 minutes, even if values outside this range are
specified.
Optionally, you can pass an AWS IAM access policy to this
operation. The temporary security credentials that are
returned by the operation have the permissions that are
associated with the access policy of the role being assumed,
except for any permissions explicitly denied by the policy you
pass. This gives you a way to further restrict the permissions
for the federated user. These policies and any applicable
resource-based policies are evaluated when calls to AWS are
made using the temporary security credentials.
Before your application can call `AssumeRoleWithSAML`, you
must configure your SAML identity provider (IdP) to issue the
claims required by AWS. Additionally, you must use AWS
Identity and Access Management (AWS IAM) to create a SAML
provider entity in your AWS account that represents your
identity provider, and create an AWS IAM role that specifies
this SAML provider in its trust policy.
Calling `AssumeRoleWithSAML` does not require the use of AWS
security credentials. The identity of the caller is validated
by using keys in the metadata document that is uploaded for
the SAML provider entity for your identity provider.
For more information, see the following resources:
+ `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Security Credentials
guide.
+ `SAML Providers`_ in the Using IAM guide.
+ `Configuring a Relying Party and Claims in the Using IAM
guide. `_
+ `Creating a Role for SAML-Based Federation`_ in the Using
IAM guide.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type principal_arn: string
:param principal_arn: The Amazon Resource Name (ARN) of the SAML
provider in AWS IAM that describes the IdP.
:type saml_assertion: string
:param saml_assertion: The base-64 encoded SAML authentication response
provided by the IdP.
For more information, see `Configuring a Relying Party and Adding
Claims`_ in the Using IAM guide.
:type policy: string
:param policy:
An AWS IAM policy in JSON format.
The temporary security credentials that are returned by this operation
have the permissions that are associated with the access policy of
the role being assumed, except for any permissions explicitly
denied by the policy you pass. These policies and any applicable
resource-based policies are evaluated when calls to AWS are made
using the temporary security credentials.
The policy must be 2048 bytes or shorter, and its packed size must be
less than 450 bytes.
:type duration_seconds: integer
:param duration_seconds:
The duration, in seconds, of the role session. The value can range from
900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the
value is set to 3600 seconds. An expiration can also be specified
in the SAML authentication response's `NotOnOrAfter` value. The
actual expiration time is whichever value is shorter.
The maximum duration for a session is 1 hour, and the minimum duration
is 15 minutes, even if values outside this range are specified.
"""
params = {
'RoleArn': role_arn,
'PrincipalArn': principal_arn,
'SAMLAssertion': saml_assertion,
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object('AssumeRoleWithSAML', params, AssumedRole,
verb='POST')
def assume_role_with_web_identity(self, role_arn, role_session_name,
web_identity_token, provider_id=None,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated in a mobile or web application with a
web identity provider, such as Login with Amazon, Facebook, or
Google. `AssumeRoleWithWebIdentity` is an API call that does
not require the use of AWS security credentials. Therefore,
you can distribute an application (for example, on mobile
devices) that requests temporary security credentials without
including long-term AWS credentials in the application or by
deploying server-based proxy services that use long-term AWS
credentials. For more information, see `Creating a Mobile
Application with Third-Party Sign-In`_ in AWS Security Token
Service .
The temporary security credentials consist of an access key
ID, a secret access key, and a security token. Applications
can use these temporary security credentials to sign calls to
AWS service APIs. The credentials are valid for the duration
that you specified when calling `AssumeRoleWithWebIdentity`,
which can be from 900 seconds (15 minutes) to 3600 seconds (1
hour). By default, the temporary security credentials are
valid for 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed. You can further restrict the permissions of the
temporary security credentials by passing a policy in the
request. The resulting permissions are an intersection of the
role's access policy and the policy that you passed. These
policies and any applicable resource-based policies are
evaluated when calls to AWS service APIs are made using the
temporary security credentials.
Before your application can call `AssumeRoleWithWebIdentity`,
you must have an identity token from a supported identity
provider and create a role that the application can assume.
The role that your application assumes must trust the identity
provider that is associated with the identity token. In other
words, the identity provider must be specified in the role's
trust policy. For more information, see ` Creating Temporary
Security Credentials for Mobile Apps Using Third-Party
Identity Providers`_.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
Typically, you pass the name or identifier that is associated with
the user who is using your application. That way, the temporary
security credentials that your application will use are associated
with that user. This session name is included as part of the ARN
and assumed role ID in the `AssumedRoleUser` response element.
:type web_identity_token: string
:param web_identity_token: The OAuth 2.0 access token or OpenID Connect
ID token that is provided by the identity provider. Your
application must get this token by authenticating the user who is
using your application with a web identity provider before the
application makes an `AssumeRoleWithWebIdentity` call.
:type provider_id: string
:param provider_id: Specify this value only for OAuth access tokens. Do
not specify this value for OpenID Connect ID tokens, such as
`accounts.google.com`. This is the fully-qualified host component
of the domain name of the identity provider. Do not include URL
schemes and port numbers. Currently, `www.amazon.com` and
`graph.facebook.com` are supported.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRoleWithWebIdentity`
call. The resulting permissions of the temporary security
credentials are an intersection of this policy and the access
policy that is associated with the role. Use this policy to further
restrict the permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name,
'WebIdentityToken': web_identity_token,
}
if provider_id is not None:
params['ProviderId'] = provider_id
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object(
'AssumeRoleWithWebIdentity',
params,
AssumedRole,
verb='POST'
)
def decode_authorization_message(self, encoded_message):
"""
Decodes additional information about the authorization status
of a request from an encoded message returned in response to
an AWS request.
For example, if a user is not authorized to perform an action
that he or she has requested, the request returns a
`Client.UnauthorizedOperation` response (an HTTP 403
response). Some AWS actions additionally return an encoded
message that can provide details about this authorization
failure.
Only certain AWS actions return an encoded authorization
message. The documentation for an individual action indicates
whether that action returns an encoded message in addition to
returning an HTTP code.
The message is encoded because the details of the
authorization status can constitute privileged information
that the user who requested the action should not see. To
decode an authorization status message, a user must be granted
permissions via an IAM policy to request the
`DecodeAuthorizationMessage` (
`sts:DecodeAuthorizationMessage`) action.
The decoded message includes the following type of
information:
+ Whether the request was denied due to an explicit deny or
due to the absence of an explicit allow. For more information,
see `Determining Whether a Request is Allowed or Denied`_ in
Using IAM .
+ The principal who made the request.
+ The requested action.
+ The requested resource.
+ The values of condition keys in the context of the user's
request.
:type encoded_message: string
:param encoded_message: The encoded message that was returned with the
response.
"""
params = {
'EncodedMessage': encoded_message,
}
return self.get_object(
'DecodeAuthorizationMessage',
params,
DecodeAuthorizationMessage,
verb='POST'
)
| [
"noreply@github.com"
] | noreply@github.com |
1e92de41ab21ce95eb3acba4eb3da6bff2bf176a | cfe872e89f657aa8a5f58c5efbab03b463575e16 | /CodeProject/wsgi.py | 89f82db22dfc65b9e954d5ad5b03cae1fbe490ce | [] | no_license | Akash-79/Code-Of-Thon | 91063c8c8aca08557f273a4b5c9c7889b12a1e66 | 921debaa136218b311f1b27d9aa96fe29224e11b | refs/heads/master | 2022-11-23T04:27:34.645409 | 2020-08-01T08:31:15 | 2020-08-01T08:31:15 | 284,213,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for CodeProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CodeProject.settings')
application = get_wsgi_application()
| [
"akashmalasane79@gmail.com"
] | akashmalasane79@gmail.com |
9c5cbb2dda577439b8fc5e973133aae546a8d80d | 51c47f22c24a1b3cd2bff3f1343657f8993dc2a9 | /ModuloChat/prueba.py | 247bf8899e9cb2acb87d96215ff635f969e95089 | [] | no_license | JesusHernandezJimenez/SistemasDistribuidos | b3951a91bddc993b174444a081d4f1b0a404b515 | 76f63dd5f17e1533075b8c7436a83237995b1a2b | refs/heads/main | 2023-06-06T14:32:46.250629 | 2021-06-22T19:22:03 | 2021-06-22T19:22:03 | 374,816,396 | 0 | 0 | null | 2021-06-22T19:22:04 | 2021-06-07T22:31:18 | Python | UTF-8 | Python | false | false | 352 | py | from tkinter import *
root = Tk()
root.title("Prueha")
root.geometry("400x400")
panel_1 = PanedWindow(bd=4, relief='flat', bg='red')
panel_1.pack(fill=BOTH, expand=1)
panel_2 = PanedWindow(panel_1, orient=HORIZONTAL, bd=4, relief='raised', bg='black')
panel_1.add(panel_2)
top = Label(panel_2, text='top panel')
panel_2.add(top)
root.mainloop() | [
"jesushernandezjimenez1998@gmail.com"
] | jesushernandezjimenez1998@gmail.com |
59df0d23f042d14bf354f6eaf802191e9da2833e | 3976b7564bae6867fefeaeca8c2f600251c9d3f5 | /aquacrop/classes.py | 340cbfc869e5839549d58d5c5b10088f75cd79ce | [
"Apache-2.0"
] | permissive | LLatyki/aquacrop | 9391aadb44c847f0c0d363b410846e122399be62 | 0382df63c126bec2754ac7ee3e8b4ef2816d8c0d | refs/heads/master | 2023-03-20T01:14:20.275605 | 2021-03-02T10:43:45 | 2021-03-02T10:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,333 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_classes.ipynb (unless otherwise specified).
__all__ = ['ClockStructClass', 'OutputClass', 'ParamStructClass', 'SoilClass', 'CropClass', 'IrrMngtClass',
'IrrMngtStruct', 'spec', 'FieldMngtClass', 'FieldMngtStruct', 'spec', 'GwClass', 'InitWCClass', 'CropStruct',
'spec', 'InitCondClass', 'spec', 'WevapClass', 'spec', 'SoilProfileClass', 'spec', 'TAWClass', 'spec',
'DrClass', 'spec', 'thRZClass', 'spec', 'KswClass', 'spec', 'KstClass', 'spec', 'CO2Class', 'spec']
# Cell
import numpy as np
import pandas as pd
from numba.experimental import jitclass
from numba import float64, int64, boolean
# Cell
class ClockStructClass:
'''
Contains model information regarding dates and step times etc.
Atributes:\n
`TimeStepCounter` : `int`: Keeps track of current timestep
`ModelTermination` : `Bool`: False unless model has finished
`SimulationStartDate` : `np.Datetime64`: Date of simulation start
`SimulationEndDate` : `np.Datetime64`: Date of simulation end
`TimeStep` : `int`: time step (evaluation needed)
`nSteps` : `int`: total number of days of simulation
`TimeSpan` : `np.array`: all dates (np.Datetime64) that lie within the start and end dates of simulation
`StepStartTime` : `np.Datetime64`: Date at start of timestep
`StepEndTime` : `np.Datetime64`: Date at end of timestep
`EvapTimeSteps` : `int`: Number of time-steps (per day) for soil evaporation calculation
`SimOffSeason` : `str`: 'Y' if you want to simulate the off season, 'N' otherwise
`PlantingDates` : `list-like`: list of planting dates in datetime format
`HarvestDates` : `list-like`: list of harvest dates in datetime format
`nSeasons` : `int`: Total number of seasons to be simulated
`SeasonCounter` : `int`: counter to keep track of which season we are currenlty simulating
'''
def __init__(self):
self.TimeStepCounter = 0 # Keeps track of current timestep
self.ModelTermination = False # False unless model has finished
self.SimulationStartDate = 0 # Date of simulation start
self.SimulationEndDate = 0 # Date of simulation end
self.TimeStep = 0 # time step (evaluaiton needed)
self.nSteps = 0 # total number of days of simulation
self.TimeSpan = 0 # all dates that lie within the start and end dates of simulation
self.StepStartTime = 0 # Date at start of timestep
self.StepEndTime = 0 # Date at start of timestep
self.EvapTimeSteps = 20 # Number of time-steps (per day) for soil evaporation calculation
self.SimOffSeason = 'N' # 'Y' if you want to simulate the off season, 'N' otherwise
self.PlantingDates = [] # list of crop planting dates during simulation
self.HarvestDates = [] # list of crop planting dates during simulation
self.nSeasons = 0 # total number of seasons (plant and harvest)
self.SeasonCounter = -1 # running counter of seasons
# Cell
class OutputClass():
'''
Class to hold output data
**Atributes**:\n
`Water` : `pandas.DataFrame` : Water storage in soil
`Flux` : `pandas.DataFrame` : Water flux
`Growth` : `pandas.DataFrame` : crop growth
`Final` : `pandas.DataFrame` : final stats
'''
def __init__(self):
self.Water = []
self.Flux = []
self.Growth = []
self.Final = []
# Cell
class ParamStructClass:
'''
The ParamStruct class contains the bulk of model Paramaters. In general these will not change over the course of the simulation
**Attributes**:\n
`Soil` : `SoilClass` : Soil object contains data and paramaters related to the soil
`FallowFieldMngt` : `FieldMngtClass` : Object containing field management variables for the off season (fallow periods)
`NCrops` : `int` : Number of crop types to be simulated
`SpecifiedPlantCalander` : `str` : Specified crop rotation calendar (Y or N)
`CropChoices` : `list` : List of crop type names in each simulated season
`CO2data` : `pd.Series` : CO2 data indexed by year
`CO2` : `CO2Class` : object containing reference and current co2 concentration
`WaterTable` : `int` : Water table present (1=yes, 0=no)
`zGW` : `np.array` : WaterTable depth (mm) for each day of simulation
`zGW_dates` : `np.array` : Corresponding dates to the zGW values
`WTMethod` : `str` : 'Constant' or 'Variable'
`CropList` : `list` : List of Crop Objects which contain paramaters for all the differnet crops used in simulations
`python_crop_list` : `list` : List of Crop Objects, one for each season
`python_fallow_crop` : `CropClass` : Crop object for off season
`Seasonal_Crop_List` : `list` : List of CropStructs, one for each season (jit class objects)
`crop_name_list` : `list` : List of crop names, one for each season
`Fallow_Crop` : `CropStruct` : CropStruct object (jit class) for off season
`Fallow_Crop_Name` : `str` : name of fallow crop
'''
def __init__(self):
# soil
self.Soil = 0
# field management
self.FallowFieldMngt = 0
# variables extracted from cropmix.txt
self.NCrops = 0
self.SpecifiedPlantCalander = ""
self.RotationFilename = ""
# calculated Co2 variables
self.CO2data = []
self.CO2 = 0
#water table
self.WaterTable = 0
self.zGW = []
self.zGW_dates = []
self.WTMethod = ""
#crops
self.CropList = []
self.python_crop_list = []
self.python_fallow_crop = 0
self.Seasonal_Crop_List =[]
self.crop_name_list =[]
self.Fallow_Crop =0
self.Fallow_Crop_Name =""
# Cell
class SoilClass:
'''
The Soil Class contains Paramaters and variables of the soil used in the simulation
**Attributes**:\n
`profile` : `pandas.DataFrame` : holds soil profile information
`Profile` : `SoilProfileClass` : jit class object holdsing soil profile information
`Hydrology` : `pandas.DataFrame`: holds soil layer hydrology informaiton
`Comp` : `pandas.DataFrame` : holds soil compartment information
A number of float attributes specified in the initialisation of the class
'''
def __init__(self,soilType,dz=[0.1]*12,
AdjREW= 1,REW= 9.0,CalcCN=0,CN=61.0,zRes=-999,
EvapZsurf = 0.04, EvapZmin = 0.15, EvapZmax = 0.30,
Kex = 1.1, fevap = 4, fWrelExp = 0.4, fwcc = 50,
zCN = 0.3, zGerm = 0.3,AdjCN=1, fshape_cr = 16, zTop = 0.1,):
self.Name=soilType
self.zSoil= sum(dz) # Total thickness of soil profile (m)
self.nComp= len(dz) # Total number of soil compartments
self.nLayer= 0 # Total number of soil layers
self.AdjREW= AdjREW # Adjust default value for readily evaporable water (0 = No, 1 = Yes)
self.REW= REW # Readily evaporable water (mm) (only used if adjusting from default value)
self.CalcCN= CalcCN # adjust Curve number based on Ksat
self.CN= CN # Curve number (0 = No, 1 = Yes)
self.zRes= zRes # Depth of restrictive soil layer (set to negative value if not present)
# Assign default program properties (should not be changed without expert knowledge)
self.EvapZsurf = EvapZsurf # Thickness of soil surface skin evaporation layer (m)
self.EvapZmin = EvapZmin # Minimum thickness of full soil surface evaporation layer (m)
self.EvapZmax = EvapZmax # Maximum thickness of full soil surface evaporation layer (m)
self.Kex = Kex # Maximum soil evaporation coefficient
self.fevap = fevap # Shape factor describing reduction in soil evaporation in stage 2.
self.fWrelExp = fWrelExp # Proportional value of Wrel at which soil evaporation layer expands
self.fwcc = fwcc # Maximum coefficient for soil evaporation reduction due to sheltering effect of withered canopy
self.zCN = zCN # Thickness of soil surface (m) used to calculate water content to adjust curve number
self.zGerm = zGerm # Thickness of soil surface (m) used to calculate water content for germination
self.AdjCN = AdjCN # Adjust curve number for antecedent moisture content (0: No, 1: Yes)
self.fshape_cr = fshape_cr # Capillary rise shape factor
self.zTop = max(zTop,dz[0]) # Thickness of soil surface layer for water stress comparisons (m)
if soilType == 'custom':
self.create_df(dz)
elif soilType == 'Clay':
self.CN = 77
self.CalcCN = 0
self.REW = 14
self.create_df(dz)
self.add_layer(sum(dz), 0.39, 0.54, 0.55, 35, 100)
elif soilType == 'ClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.23, 0.39, 0.5, 125, 100)
elif soilType == 'Loam':
self.CN = 61
self.CalcCN = 0
self.REW = 9
self.create_df(dz)
self.add_layer(sum(dz), 0.15, 0.31, 0.46, 500, 100)
elif soilType == 'LoamySand':
self.CN = 46
self.CalcCN = 0
self.REW = 5
self.create_df(dz)
self.add_layer(sum(dz), 0.08, 0.16, 0.38, 2200, 100)
elif soilType == 'Sand':
self.CN = 46
self.CalcCN = 0
self.REW = 4
self.create_df(dz)
self.add_layer(sum(dz), 0.06, 0.13, 0.36, 3000, 100)
elif soilType == 'SandyClay':
self.CN = 77
self.CalcCN = 0
self.REW = 10
self.create_df(dz)
self.add_layer(sum(dz), 0.27, 0.39, 0.5, 35, 100)
elif soilType == 'SandyClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 9
self.create_df(dz)
self.add_layer(sum(dz), 0.20, 0.32, 0.47, 225, 100)
elif soilType == 'SandyLoam':
self.CN = 46
self.CalcCN = 0
self.REW = 7
self.create_df(dz)
self.add_layer(sum(dz), 0.10, 0.22, 0.41, 1200, 100)
elif soilType == 'Silt':
self.CN = 61
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.09, 0.33, 0.43, 500, 100)
elif soilType == 'SiltClayLoam':
self.CN = 72
self.CalcCN = 0
self.REW = 13
self.create_df(dz)
self.add_layer(sum(dz), 0.23, 0.44, 0.52, 150, 100)
elif soilType == 'SiltLoam':
self.CN = 61
self.CalcCN = 0
self.REW = 11
self.create_df(dz)
self.add_layer(sum(dz), 0.13, 0.33, 0.46, 575, 100)
elif soilType == 'SiltClay':
self.CN = 72
self.CalcCN = 0
self.REW = 14
self.create_df(dz)
self.add_layer(sum(dz), 0.32, 0.50, 0.54, 100, 100)
elif soilType == 'Paddy':
self.CN = 77
self.CalcCN = 0
self.REW = 10
self.create_df(dz)
self.add_layer(0.5, 0.32, 0.50, 0.54, 15, 100)
self.add_layer(1.5, 0.39, 0.54, 0.55, 2, 100)
elif soilType == 'ac_TunisLocal':
self.CN = 46
self.CalcCN = 0
self.REW = 7
dz = [0.1]*6 + [0.15]*5 + [0.2]
self.create_df(dz)
self.add_layer(0.3, 0.24, 0.40, 0.50, 155, 100)
self.add_layer(1.7, 0.11, 0.33, 0.46, 500, 100)
else:
print('wrong soil type')
assert 1==2
def __repr__(self):
for key in self.__dict__:
if key != 'profile':
print(f"{key}: {getattr(self,key)}")
return ' '
def create_df(self,dz):
self.profile = pd.DataFrame(np.empty((len(dz),4)),columns=["Comp","Layer","dz","dzsum"])
self.profile.dz = dz
self.profile.dzsum = np.cumsum(self.profile.dz).round(2)
self.profile.Comp = np.arange(len(dz))
self.profile.Layer = np.nan
self.profile["zBot"] = self.profile.dzsum
self.profile["zTop"] = self.profile["zBot"]-self.profile.dz
self.profile["zMid"] = (self.profile["zTop"]+self.profile["zBot"])/2
def calculate_soil_hydraulic_properties(self,Sand,Clay,OrgMat,DF=1):
"""
Function to calculate soil hydraulic properties, given textural inputs.
Calculations use pedotransfer function equations described in Saxton and Rawls (2006)
"""
# do calculations
#Water content at permanent wilting point
Pred_thWP = ( -(0.024*Sand) + (0.487*Clay) + (0.006*OrgMat)
+(0.005*Sand*OrgMat) - (0.013*Clay*OrgMat)
+(0.068*Sand*Clay) + 0.031 )
th_wp = Pred_thWP+(0.14*Pred_thWP)-0.02
# Water content at field capacity and saturation
Pred_thFC = ( -(0.0251*Sand) + (0.195*Clay) + (0.011*OrgMat)
+(0.006*Sand*OrgMat) - (0.027*Clay*OrgMat)
+(0.452*Sand*Clay) + 0.299 )
PredAdj_thFC = Pred_thFC+((1.283*(np.power(Pred_thFC,2)))-(0.374*Pred_thFC)-0.015)
Pred_thS33 = ( (0.0278*Sand) + (0.034*Clay) + (0.022*OrgMat)
-(0.018*Sand*OrgMat) - (0.027*Clay*OrgMat)
+(0.584*Sand*Clay) + 0.078 )
PredAdj_thS33 = Pred_thS33+((0.636*Pred_thS33)-0.107)
Pred_thS = (PredAdj_thFC+PredAdj_thS33)+((-0.097*Sand)+0.043)
pN = (1-Pred_thS)*2.65
pDF = pN*DF
PorosComp = (1-(pDF/2.65))-(1-(pN/2.65))
PorosCompOM = 1-(pDF/2.65)
DensAdj_thFC = PredAdj_thFC+(0.2*PorosComp)
DensAdj_thS = PorosCompOM
th_fc = DensAdj_thFC
th_s = DensAdj_thS
# Saturated hydraulic conductivity (mm/day)
lmbda = 1/((np.log(1500)-np.log(33))/(np.log(th_fc)-np.log(th_wp)))
Ksat = (1930*(th_s-th_fc)**(3-lmbda))*24
# Water content at air dry
th_dry = th_wp / 2
#round values
th_dry = round(10_000*th_dry)/10_000
th_wp = round(1000*th_wp)/1000
th_fc = round(1000*th_fc)/1000
th_s = round(1000*th_s)/1000
Ksat = round(10*Ksat)/10
return th_wp,th_fc,th_s,Ksat
def add_layer_from_texture(self,thickness,Sand,Clay,OrgMat,penetrability):
th_wp,th_fc,th_s,Ksat=self.calculate_soil_hydraulic_properties(Sand/100,Clay/100,OrgMat)
self.add_layer(thickness, th_wp, th_fc, th_s, Ksat, penetrability)
def add_layer(self,thickness, thWP, thFC, thS, Ksat, penetrability):
self.nLayer +=1
num_layers = len(self.profile.dropna().Layer.unique())
new_layer = num_layers+1
if new_layer==1:
self.profile.loc[(round(thickness,2)>=round(self.profile.dzsum,2)),"Layer"] = new_layer
else:
last = self.profile[self.profile.Layer==new_layer-1].dzsum.values[-1]
self.profile.loc[(thickness+last>=self.profile.dzsum) & (self.profile.Layer.isna()),"Layer"] = new_layer
self.profile.loc[self.profile.Layer==new_layer,"th_dry"] = self.profile.Layer.map({new_layer:thWP/2})
self.profile.loc[self.profile.Layer==new_layer,"th_wp"] = self.profile.Layer.map({new_layer:thWP})
self.profile.loc[self.profile.Layer==new_layer,"th_fc"] = self.profile.Layer.map({new_layer:thFC})
self.profile.loc[self.profile.Layer==new_layer,"th_s"] = self.profile.Layer.map({new_layer:thS})
self.profile.loc[self.profile.Layer==new_layer,"Ksat"] = self.profile.Layer.map({new_layer:Ksat})
self.profile.loc[self.profile.Layer==new_layer,"penetrability"] = self.profile.Layer.map({new_layer:penetrability})
# Calculate drainage characteristic (tau)
# Calculations use equation given by Raes et al. 2012
tau = round(0.0866*(Ksat**0.35),2)
if tau > 1:
tau = 1
elif tau < 0:
tau = 0
self.profile.loc[self.profile.Layer==new_layer,"tau"] = self.profile.Layer.map({new_layer:tau})
def fill_nan(self,):
self.profile = self.profile.fillna(method='ffill')
self.profile.dz = self.profile.dz.round(2)
self.profile.dzsum = self.profile.dz.cumsum().round(2)
self.zSoil = round(self.profile.dz.sum(),2)
self.nComp = len(self.profile)
self.profile.Layer = self.profile.Layer.astype(int)
def add_capillary_rise_params(self,):
# Calculate capillary rise parameters for all soil layers
# Only do calculation if water table is present. Calculations use equations
# described in Raes et al. (2012)
prof = self.profile
hydf = prof.groupby('Layer').mean().drop(['dz','dzsum'],axis=1)
hydf["aCR"] = 0
hydf["bCR"] = 0
for layer in hydf.index.unique():
layer = int(layer)
soil=hydf.loc[layer]
thwp = soil.th_wp
thfc = soil.th_fc
ths = soil.th_s
Ksat = soil.Ksat
aCR = 0
bCR = 0
if (thwp >= 0.04) and (thwp <= 0.15) and (thfc >= 0.09) and \
(thfc <= 0.28) and (ths >= 0.32) and (ths <= 0.51):
# Sandy soil class
if (Ksat >= 200) and (Ksat <= 2000):
aCR = -0.3112-(Ksat*(1e-5))
bCR = -1.4936+(0.2416*np.log(Ksat))
elif Ksat < 200:
aCR = -0.3112-(200*(1e-5));
bCR = -1.4936+(0.2416*np.log(200))
elif Ksat > 2000:
aCR = -0.3112-(2000*(1e-5));
bCR = -1.4936+(0.2416*np.log(2000));
elif (thwp >= 0.06) and (thwp <= 0.20) and (thfc >= 0.23) and \
(thfc <= 0.42) and (ths >= 0.42) and (ths <= 0.55):
# Loamy soil class
if (Ksat >= 100) and (Ksat <= 750):
aCR = -0.4986+(9*(1e-5)*Ksat)
bCR = -2.132+(0.4778*np.log(Ksat))
elif Ksat < 100:
aCR = -0.4986+(9*(1e-5)*100)
bCR = -2.132+(0.4778*np.log(100))
elif Ksat > 750:
aCR = -0.4986+(9*(1e-5)*750)
bCR = -2.132+(0.4778*np.log(750))
elif (thwp >= 0.16) and (thwp <= 0.34) and (thfc >= 0.25) and \
(thfc <= 0.45) and (ths >= 0.40) and (ths <= 0.53):
# Sandy clayey soil class
if (Ksat >= 5) and (Ksat <= 150):
aCR = -0.5677-(4*(1e-5)*Ksat)
bCR = -3.7189+(0.5922*np.log(Ksat))
elif Ksat < 5:
aCR = -0.5677-(4*(1e-5)*5)
bCR = -3.7189+(0.5922*np.log(5))
elif Ksat > 150:
aCR = -0.5677-(4*(1e-5)*150)
bCR = -3.7189+(0.5922*np.log(150))
elif (thwp >= 0.20) and (thwp <= 0.42) and (thfc >= 0.40) and \
(thfc <= 0.58) and (ths >= 0.49) and (ths <= 0.58):
# Silty clayey soil class
if (Ksat >= 1) and (Ksat <= 150):
aCR = -0.6366+(8*(1e-4)*Ksat)
bCR = -1.9165+(0.7063*np.log(Ksat))
elif Ksat < 1:
aCR = -0.6366+(8*(1e-4)*1)
bCR = -1.9165+(0.7063*np.log(1))
elif Ksat > 150:
aCR = -0.6366+(8*(1e-4)*150)
bCR = -1.9165+(0.7063*np.log(150))
assert aCR != 0
assert bCR != 0
prof.loc[prof.Layer==layer,"aCR"] = prof.Layer.map({layer:aCR})
prof.loc[prof.Layer==layer,"bCR"] = prof.Layer.map({layer:bCR})
self.profile=prof
# Cell
class CropClass:
'''
The Crop Class contains Paramaters and variables of the crop used in the simulation
**Attributes**:\n
`c_name`: `str`: crop name ('custom' or one of built in defaults e.g. 'Maize')
`PlantingDate` : `str` : Planting Date (mm/dd)
`HarvestDate` : `str` : Latest Harvest Date (mm/dd)
`CropType` : `int` : Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
`PlantMethod` : `int` : Planting method (0 = Transplanted, 1 = Sown)
`CalendarType` : `int` : Calendar Type (1 = Calendar days, 2 = Growing degree days)
`SwitchGDD` : `int` : Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
`IrrMngt`: `dict` : dictionary containting irrigation management information
`IrrSchd` : `pandas.DataFrame` : pandas DataFrame containing the Irrigation Schedule if predefined
`FieldMngt` : `dict` : Dictionary containing field management variables for the growing season of the crop
A number of default program properties of type float are also specified during initialisation
'''
def __init__(self,c_name,PlantingDate,HarvestDate=None,**kwargs):
self.Name = ''
# Assign default program properties (should not be changed without expert knowledge)
self.fshape_b = 13.8135 # Shape factor describing the reduction in biomass production for insufficient growing degree days
self.PctZmin = 70 # Initial percentage of minimum effective rooting depth
self.fshape_ex = -6 # Shape factor describing the effects of water stress on root expansion
self.ETadj = 1 # Adjustment to water stress thresholds depending on daily ET0 (0 = No, 1 = Yes)
self.Aer = 5 # Vol (%) below saturation at which stress begins to occur due to deficient aeration
self.LagAer = 3 # Number of days lag before aeration stress affects crop growth
self.beta = 12 # Reduction (%) to p_lo3 when early canopy senescence is triggered
self.a_Tr = 1 # Exponent parameter for adjustment of Kcx once senescence is triggered
self.GermThr = 0.2 # Proportion of total water storage needed for crop to germinate
self.CCmin = 0.05 # Minimum canopy size below which yield formation cannot occur
self.MaxFlowPct = 100/3 # Proportion of total flowering time (%) at which peak flowering occurs
self.HIini = 0.01 # Initial harvest index
self.bsted = 0.000138 # WP co2 adjustment parameter given by Steduto et al. 2007
self.bface = 0.001165 # WP co2 adjustment parameter given by FACE experiments
if c_name == 'Maize':
self.Name = 'Maize'
# added in Read_Model_Paramaters
self.CropType= 3 # Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
self.PlantMethod= 1 # Planting method (0 = Transplanted, 1 = Sown)
self.CalendarType= 2 # Calendar Type (1 = Calendar days, 2 = Growing degree days)
self.SwitchGDD= 0 # Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
self.Emergence = 80 # Growing degree/Calendar days from sowing to emergence/transplant recovery
self.MaxRooting = 1420 # Growing degree/Calendar days from sowing to maximum rooting
self.Senescence = 1420 # Growing degree/Calendar days from sowing to senescence
self.Maturity = 1670 # Growing degree/Calendar days from sowing to maturity
self.HIstart = 850 # Growing degree/Calendar days from sowing to start of yield formation
self.Flowering = 190 # Duration of flowering in growing degree/calendar days (-999 for non-fruit/grain crops)
self.YldForm = 775 # Duration of yield formation in growing degree/calendar days
self.GDDmethod = 2 # Growing degree day calculation method
self.Tbase = 8 # Base temperature (degC) below which growth does not progress
self.Tupp = 30 # Upper temperature (degC) above which crop development no longer increases
self.PolHeatStress = 1 # Pollination affected by heat stress (0 = No, 1 = Yes)
self.Tmax_up = 40 # Maximum air temperature (degC) above which pollination begins to fail
self.Tmax_lo = 45 # Maximum air temperature (degC) at which pollination completely fails
self.PolColdStress = 1 # Pollination affected by cold stress (0 = No, 1 = Yes)
self.Tmin_up = 10 # Minimum air temperature (degC) below which pollination begins to fail
self.Tmin_lo = 5 # Minimum air temperature (degC) at which pollination completely fails
self.TrColdStress = 1 # Transpiration affected by cold temperature stress (0 = No, 1 = Yes)
self.GDD_up = 12 # Minimum growing degree days (degC/day) required for full crop transpiration potential
self.GDD_lo = 0 # Growing degree days (degC/day) at which no crop transpiration occurs
self.Zmin = 0.3 # Minimum effective rooting depth (m)
self.Zmax = 1.7 # Maximum rooting depth (m)
self.fshape_r = 1.3 # Shape factor describing root expansion
self.SxTopQ = 0.0480 # Maximum root water extraction at top of the root zone (m3/m3/day)
self.SxBotQ = 0.0117 # Maximum root water extraction at the bottom of the root zone (m3/m3/day)
self.SeedSize = 6.5 # Soil surface area (cm2) covered by an individual seedling at 90% emergence
self.PlantPop = 75_000 # Number of plants per hectare
self.CCx = 0.96 # Maximum canopy cover (fraction of soil cover)
self.CDC = 0.01 # Canopy decline coefficient (fraction per GDD/calendar day)
self.CGC = 0.0125 # Canopy growth coefficient (fraction per GDD)
self.Kcb = 1.05 # Crop coefficient when canopy growth is complete but prior to senescence
self.fage = 0.3 # Decline of crop coefficient due to ageing (%/day)
self.WP = 33.7 # Water productivity normalized for ET0 and C02 (g/m2)
self.WPy = 100 # Adjustment of water productivity in yield formation stage (% of WP)
self.fsink = 0.5 # Crop performance under elevated atmospheric CO2 concentration (%/100)
self.HI0 = 0.48 # Reference harvest index
self.dHI_pre = 0 # Possible increase of harvest index due to water stress before flowering (%)
self.a_HI = 7 # Coefficient describing positive impact on harvest index of restricted vegetative growth during yield formation
self.b_HI = 3 # Coefficient describing negative impact on harvest index of stomatal closure during yield formation
self.dHI0 = 15 # Maximum allowable increase of harvest index above reference value
self.Determinant = 1 # Crop Determinancy (0 = Indeterminant, 1 = Determinant)
self.exc = 50 # Excess of potential fruits
self.p_up1 = 0.14 # Upper soil water depletion threshold for water stress effects on affect canopy expansion
self.p_up2 = 0.69 # Upper soil water depletion threshold for water stress effects on canopy stomatal control
self.p_up3 = 0.69 # Upper soil water depletion threshold for water stress effects on canopy senescence
self.p_up4 = 0.8 # Upper soil water depletion threshold for water stress effects on canopy pollination
self.p_lo1 = 0.72 # Lower soil water depletion threshold for water stress effects on canopy expansion
self.p_lo2 = 1 # Lower soil water depletion threshold for water stress effects on canopy stomatal control
self.p_lo3 = 1 # Lower soil water depletion threshold for water stress effects on canopy senescence
self.p_lo4 = 1 # Lower soil water depletion threshold for water stress effects on canopy pollination
self.fshape_w1 = 2.9 # Shape factor describing water stress effects on canopy expansion
self.fshape_w2 = 6 # Shape factor describing water stress effects on stomatal control
self.fshape_w3 = 2.7 # Shape factor describing water stress effects on canopy senescence
self.fshape_w4 = 1 # Shape factor describing water stress effects on pollination
elif c_name == 'Wheat':
self.Name = 'Wheat'
self.CropType= 3; self.PlantMethod= 1; self.CalendarType= 2
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '10/15'; self.HarvestDate= '05/30'
self.Emergence = 150; self.MaxRooting = 864; self.Senescence = 1700
self.Maturity = 2400; self.HIstart = 1250; self.Flowering = 200
self.YldForm = 1100; self.GDDmethod = 3; self.Tbase = 0
self.Tupp = 26; self.PolHeatStress = 1; self.Tmax_up = 35
self.Tmax_lo = 40; self.PolColdStress = 1; self.Tmin_up = 5
self.Tmin_lo = 0; self.TrColdStress = 1; self.GDD_up = 14
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 1.5
self.fshape_r = 1.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 1.5; self.PlantPop = 4_500_000; self.CCx = 0.96
self.CDC = 0.004; self.CGC = 0.005001; self.Kcb = 1.1
self.fage = 0.15; self.WP = 15; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.48; self.dHI_pre = 5
self.a_HI = 10; self.b_HI = 7; self.dHI0 = 15
self.Determinant = 1; self.exc = 100; self.p_up1 = 0.2
self.p_up2 = 0.65; self.p_up3 = 0.7; self.p_up4 = 0.85
self.p_lo1 = 0.65; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 5.; self.fshape_w2 = 2.5
self.fshape_w3 = 2.5; self.fshape_w4 = 1.
elif c_name == 'Potato':
self.Name = 'Potato'
self.CropType= 2; self.PlantMethod= 0; self.CalendarType= 1
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '04/25'; self.HarvestDate= '08/30'
self.Emergence = 15; self.MaxRooting = 50; self.Senescence = 105
self.Maturity = 125; self.HIstart = 46; self.Flowering = -999
self.YldForm = 77; self.GDDmethod = 3; self.Tbase = 2
self.Tupp = 26; self.PolHeatStress = 0; self.Tmax_up = -999
self.Tmax_lo = -999; self.PolColdStress = 0; self.Tmin_up = -999
self.Tmin_lo = -999; self.TrColdStress = 1; self.GDD_up = 7
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 0.6
self.fshape_r = 1.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 15; self.PlantPop = 40_000; self.CCx = 0.92
self.CDC = 0.01884; self.CGC = 0.126; self.Kcb = 1.1
self.fage = 0.15; self.WP = 18; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.85; self.dHI_pre = 2
self.a_HI = 0; self.b_HI = 10; self.dHI0 = 5
self.Determinant = 0; self.exc = 0; self.p_up1 = 0.2
self.p_up2 = 0.6; self.p_up3 = 0.7; self.p_up4 = 0.8
self.p_lo1 = 0.6; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 3.; self.fshape_w2 = 3
self.fshape_w3 = 3; self.fshape_w4 = 0
elif c_name == 'Rice':
self.Name = 'Rice'
self.CropType= 3; self.PlantMethod= 0; self.CalendarType= 2
self.SwitchGDD= 0;
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
#self.PlantingDate= '08/01'; self.HarvestDate= '11/30'
self.Emergence = 102; self.MaxRooting = 381; self.Senescence = 1450
self.Maturity = 1707; self.HIstart = 1088; self.Flowering = 318
self.YldForm = 577; self.GDDmethod = 3; self.Tbase = 8
self.Tupp = 30; self.PolHeatStress = 1; self.Tmax_up = 35
self.Tmax_lo = 40; self.PolColdStress = 1; self.Tmin_up = 8
self.Tmin_lo = 3; self.TrColdStress = 1; self.GDD_up = 10
self.GDD_lo = 0; self.Zmin = 0.3; self.Zmax = 0.5
self.fshape_r = 2.5; self.SxTopQ = 0.0480; self.SxBotQ = 0.012
self.SeedSize = 6; self.PlantPop = 1_000_000; self.CCx = 0.95
self.CDC = 0.006172; self.CGC = 0.006163; self.Kcb = 1.1
self.fage = 0.15; self.WP = 19; self.WPy = 100
self.fsink = 0.5; self.HI0 = 0.43; self.dHI_pre = 0
self.a_HI = 10; self.b_HI = 7; self.dHI0 = 15
self.Determinant = 1; self.exc = 100; self.p_up1 = 0
self.p_up2 = 0.5; self.p_up3 = 0.55; self.p_up4 = 0.75
self.p_lo1 = 0.4; self.p_lo2 = 1; self.p_lo3 = 1
self.p_lo4 = 1; self.fshape_w1 = 3.; self.fshape_w2 = 3
self.fshape_w3 = 3; self.fshape_w4 = 2.7
# no aeration stress for rice
self.Aer = -1e10; self.LagAer = 1e10
elif c_name == 'custom':
# temporary solution for new crops
# if using this ensure that all paramaters in 'allowed_keys'
# are passed in as arguments at initialization
self.PlantingDate= PlantingDate # Planting Date (mm/dd)
self.HarvestDate= HarvestDate # Latest Harvest Date (mm/dd)
self.Name = 'custom'
else:
assert 1==2, 'wrong crop name'
# set any paramaters specified by user
allowed_keys = {'fshape_b','PctZmin','fshape_ex','ETadj','Aer','LagAer',
'beta','a_Tr','GermThr','CCmin','MaxFlowPct','HIini',
'bsted','bface','CropType','PlantMethod','CalendarType','SwitchGDD','PlantingDate',
'HarvestDate','Emergence','MaxRooting','Senescence','Maturity',
'HIstart','Flowering','YldForm','GDDmethod','Tbase','Tupp',
'PolHeatStress','Tmax_up','Tmax_lo','PolColdStress','Tmin_up',
'Tmin_lo','TrColdStress','GDD_up','GDD_lo','Zmin','Zmax',
'fshape_r','SxTopQ','SxBotQ','SeedSize','PlantPop','CCx','CDC',
'CGC','Kcb','fage','WP','WPy','fsink','HI0','dHI_pre','a_HI','b_HI',
'dHI0','Determinant','exc','p_up1','p_up2','p_up3','p_up4',
'p_lo1','p_lo2','p_lo3','p_lo4','fshape_w1','fshape_w2','fshape_w3',
'fshape_w4'}
self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)
self.calculate_additional_params()
def calculate_additional_params(self,):
# Calculate additional parameters for all self types in mix
# Fractional canopy cover size at emergence
self.CC0 = self.PlantPop*self.SeedSize*1e-8
# Root extraction terms
SxTopQ = self.SxTopQ
SxBotQ = self.SxBotQ
S1 = self.SxTopQ
S2 = self.SxBotQ
if S1 == S2:
SxTop = S1
SxBot = S2
else:
if SxTopQ < SxBotQ:
S1 = SxBotQ
S2 = SxTopQ
xx = 3*(S2/(S1-S2))
if xx < 0.5:
SS1 = (4/3.5)*S1
SS2 = 0
else:
SS1 = (xx+3.5)*(S1/(xx+3))
SS2 = (xx-0.5)*(S2/xx)
if SxTopQ > SxBotQ:
SxTop = SS1
SxBot = SS2
else:
SxTop = SS2
SxBot = SS1
self.SxTop = SxTop
self.SxBot = SxBot
# Water stress thresholds
self.p_up = np.array([self.p_up1,self.p_up2,self.p_up3,self.p_up4])
self.p_lo = np.array([self.p_lo1,self.p_lo2,self.p_lo3,self.p_lo4])
self.fshape_w = np.array([self.fshape_w1,self.fshape_w2,self.fshape_w3,self.fshape_w4])
# def flowerfun(self,xx):
# assert self.CropType == 3
# return (0.00558*(xx**0.63))-(0.000969*xx)-0.00383
# Cell
class IrrMngtClass:
"""
Farmer Class defines irrigation strategy
**Attributes:**\n
`Name` : `str` : name
`IrrMethod` : `int` : Irrigation method {0: rainfed, 1: soil moisture targets, 2: set time interval,
3: predifined schedule, 4: net irrigation, 5: constant depth }
`WetSurf` : `int` : Soil surface wetted by irrigation (%)
`AppEff` : `int` : Irrigation application efficiency (%)
`MaxIrr` : `float` : Maximum depth (mm) that can be applied each day
`SMT` : `list` : Soil moisture targets (%TAW) to maintain in each growth stage (only used if irrigation method is equal to 1)
`IrrInterval` : `int` : Irrigation interval in days (only used if irrigation method is equal to 2)
`Schedule` : `pandas.DataFrame` : DataFrame containing dates and depths
`NetIrrSMT` : `float` : Net irrigation threshold moisture level (% of TAW that will be maintained, for IrrMethod=4)
`Depth` : `float` : constant depth to apply on each day
"""
def __init__(self,IrrMethod,**kwargs):
self.IrrMethod=IrrMethod
self.WetSurf = 100.
self.AppEff = 100.
self.MaxIrr = 25.
self.MaxIrrSeason = 10_000.
self.SMT=np.zeros(4)
self.IrrInterval = 0
self.Schedule=[]
self.NetIrrSMT = 80.
self.depth = 0.
if IrrMethod == 1:
self.SMT=[100]*4
if IrrMethod == 2:
self.IrrInterval = 3
if IrrMethod == 3:
#wants a pandas dataframe with Date and Depth, pd.Datetime and float
"""
dates = pd.DatetimeIndex(['20/10/1979','20/11/1979','20/12/1979'])
depths = [25,25,25]
irr=pd.DataFrame([dates,depths]).T
irr.columns=['Date','Depth']
"""
self.Schedule = pd.DataFrame(columns=['Date','Depth'])
if IrrMethod == 4:
self.NetIrrSMT = 80
if IrrMethod == 5:
self.depth = 0
allowed_keys = {'name','WetSurf','AppEff','MaxIrr','MaxIrrSeason','SMT','IrrInterval','NetIrrSMT','Schedule'}
self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)
# Cell
spec = [
('IrrMethod', int64),
('WetSurf', float64),
('AppEff', float64),
('MaxIrr', float64),
('MaxIrrSeason', float64),
('SMT', float64[:]),
('IrrInterval', int64),
('Schedule', float64[:]),
('NetIrrSMT', float64),
('depth', float64),
]
@jitclass(spec)
class IrrMngtStruct:
"""
"""
def __init__(self,sim_len):
self.IrrMethod=0
self.WetSurf = 100.
self.AppEff = 100.
self.MaxIrr = 25.
self.MaxIrrSeason = 10_000
self.SMT=np.zeros(4)
self.IrrInterval = 0
self.Schedule=np.zeros(sim_len)
self.NetIrrSMT = 80.
self.depth = 0.
# Cell
class FieldMngtClass:
'''
Field Management Class
**Attributes:**\n
`Mulches` : `bool` : Soil surface covered by mulches (Y or N)
`Bunds` : `bool` : Surface bunds present (Y or N)
`CNadj` : `bool` : Field conditions affect curve number (Y or N)
`SRinhb` : `bool` : Management practices fully inhibit surface runoff (Y or N)
`MulchPct` : `float` : Area of soil surface covered by mulches (%)
`fMulch` : `float` : Soil evaporation adjustment factor due to effect of mulches
`zBund` : `float` : Bund height (m)
`BundWater` : `float` : Initial water height in surface bunds (mm)
`CNadjPct` : `float` : Percentage change in curve number (positive or negative)
'''
def __init__(self,Mulches=False,Bunds=False,CNadj=False,SRinhb=False,
MulchPct=50,fMulch=0.5,zBund=0,BundWater=0,
CNadjPct=0):
self.Mulches = Mulches # Soil surface covered by mulches (Y or N)
self.Bunds = Bunds # Surface bunds present (Y or N)
self.CNadj = CNadj # Field conditions affect curve number (Y or N)
self.SRinhb = SRinhb # Management practices fully inhibit surface runoff (Y or N)
self.MulchPct = MulchPct # Area of soil surface covered by mulches (%)
self.fMulch = fMulch # Soil evaporation adjustment factor due to effect of mulches
self.zBund = zBund # Bund height (m)
self.BundWater = BundWater # Initial water height in surface bunds (mm)
self.CNadjPct = CNadjPct # Percentage change in curve number (positive or negative)
# Cell
spec = [
('Mulches', boolean),
('Bunds', boolean),
('CNadj', boolean),
('SRinhb', boolean),
('MulchPct', float64),
('fMulch', float64),
('zBund', float64),
('BundWater', float64),
('CNadjPct', float64),
]
@jitclass(spec)
class FieldMngtStruct:
"""
"""
def __init__(self):
self.Mulches=False
self.Bunds=False
self.CNadj=False
self.SRinhb=False
self.MulchPct = 0.
self.fMulch = 0.
self.zBund = 0.
self.BundWater = 0.
self.CNadjPct = 0.
# Cell
class GwClass:
'''
Ground Water Class stores information on water table params
**Attributes:**\n
`WaterTable` : `str` : Water table considered (Y or N)
`Method` : `str` : Water table input data ('Constant' or 'Variable')
`dates` : `list` : water table observation dates
`values` : `list` : water table observation depths
'''
def __init__(self,WaterTable='N',Method='Constant',dates=[],values=[]):
self.WaterTable = WaterTable
self.Method = Method
self.dates=dates
self.values=values
# Cell
class InitWCClass:
'''
Initial water content Class defines water content at start of sim
**Attributes:**\n
`wc_type` : `str` : Type of value ('Prop' = 'WP'/'FC'/'SAT'; 'Num' = XXX m3/m3; 'Pct' = % TAW))
`Method` : `str` : Method ('Depth' = Interpolate depth points; 'Layer' = Constant value for each soil layer)
`depth_layer` : `list` : location in soil profile (soil layer or depth)
`value` : `list` : value at that location
'''
def __init__(self,wc_type='Prop',Method='Layer',
depth_layer=[1],value=['FC']):
assert len(depth_layer)==len(value)
self.wc_type = wc_type
self.Method = Method
self.depth_layer = depth_layer
self.value = value
# Cell
spec=[
('fshape_b',float64),
('PctZmin',float64),
('fshape_ex',float64),
('ETadj',float64),
('Aer',float64),
('LagAer',int64),
('beta',float64),
('a_Tr',float64),
('GermThr',float64),
('CCmin',float64),
('MaxFlowPct',float64),
('HIini',float64),
('bsted',float64),
('bface',float64),
('CropType',int64),
('PlantMethod',int64),
('CalendarType',int64),
('SwitchGDD',int64),
('EmergenceCD', int64),
('Canopy10PctCD', int64),
('MaxRootingCD', int64),
('SenescenceCD', int64),
('MaturityCD', int64),
('MaxCanopyCD', int64),
('CanopyDevEndCD', int64),
('HIstartCD', int64),
('HIendCD', int64),
('YldFormCD', int64),
('Emergence',float64),
('MaxRooting',float64),
('Senescence',float64),
('Maturity',float64),
('HIstart',float64),
('Flowering',float64),
('YldForm',float64),
('HIend',float64),
('CanopyDevEnd',float64),
('MaxCanopy',float64),
('GDDmethod',int64),
('Tbase',float64),
('Tupp',float64),
('PolHeatStress',int64),
('Tmax_up',float64),
('Tmax_lo',float64),
('PolColdStress',int64),
('Tmin_up',float64),
('Tmin_lo',float64),
('TrColdStress',int64),
('GDD_up',float64),
('GDD_lo',float64),
('Zmin',float64),
('Zmax',float64),
('fshape_r',float64),
('SxTopQ',float64),
('SxBotQ',float64),
('SxTop',float64),
('SxBot',float64),
('SeedSize',float64),
('PlantPop',int64),
('CCx',float64),
('CDC',float64),
('CGC',float64),
('Kcb',float64),
('fage',float64),
('WP',float64),
('WPy',float64),
('fsink',float64),
('HI0',float64),
('dHI_pre',float64),
('a_HI',float64),
('b_HI',float64),
('dHI0',float64),
('Determinant',int64),
('exc',float64),
('p_up',float64[:]),
('p_lo',float64[:]),
('fshape_w',float64[:]),
('Canopy10Pct',int64),
('CC0',float64),
('HIGC',float64),
('tLinSwitch',int64),
('dHILinear',float64),
('fCO2',float64),
('FloweringCD',int64),
('FloweringEnd',float64),
]
@jitclass(spec)
class CropStruct(object):
'''
The Crop Class contains Paramaters and variables of the crop used in the simulation
**Attributes**:\n
'''
def __init__(self,):
# Assign default program properties (should not be changed without expert knowledge)
self.fshape_b = 13.8135 # Shape factor describing the reduction in biomass production for insufficient growing degree days
self.PctZmin = 70 # Initial percentage of minimum effective rooting depth
self.fshape_ex = -6 # Shape factor describing the effects of water stress on root expansion
self.ETadj = 1 # Adjustment to water stress thresholds depending on daily ET0 (0 = No, 1 = Yes)
self.Aer = 5 # Vol (%) below saturation at which stress begins to occur due to deficient aeration
self.LagAer = 3 # Number of days lag before aeration stress affects crop growth
self.beta = 12 # Reduction (%) to p_lo3 when early canopy senescence is triggered
self.a_Tr = 1 # Exponent parameter for adjustment of Kcx once senescence is triggered
self.GermThr = 0.2 # Proportion of total water storage needed for crop to germinate
self.CCmin = 0.05 # Minimum canopy size below which yield formation cannot occur
self.MaxFlowPct = 100/3 # Proportion of total flowering time (%) at which peak flowering occurs
self.HIini = 0.01 # Initial harvest index
self.bsted = 0.000138 # WP co2 adjustment parameter given by Steduto et al. 2007
self.bface = 0.001165 # WP co2 adjustment parameter given by FACE experiments
# added in Read_Model_Paramaters
self.CropType= 3 # Crop Type (1 = Leafy vegetable, 2 = Root/tuber, 3 = Fruit/grain)
self.PlantMethod= 1 # Planting method (0 = Transplanted, 1 = Sown)
self.CalendarType= 2 # Calendar Type (1 = Calendar days, 2 = Growing degree days)
self.SwitchGDD= 0 # Convert calendar to GDD mode if inputs are given in calendar days (0 = No; 1 = Yes)
self.EmergenceCD = 0
self.Canopy10PctCD = 0
self.MaxRootingCD = 0
self.SenescenceCD = 0
self.MaturityCD = 0
self.MaxCanopyCD = 0
self.CanopyDevEndCD = 0
self.HIstartCD = 0
self.HIendCD = 0
self.YldFormCD = 0
self.Emergence = 80 # Growing degree/Calendar days from sowing to emergence/transplant recovery
self.MaxRooting = 1420 # Growing degree/Calendar days from sowing to maximum rooting
self.Senescence = 1420 # Growing degree/Calendar days from sowing to senescence
self.Maturity = 1670 # Growing degree/Calendar days from sowing to maturity
self.HIstart = 850 # Growing degree/Calendar days from sowing to start of yield formation
self.Flowering = 190 # Duration of flowering in growing degree/calendar days (-999 for non-fruit/grain crops)
self.YldForm = 775 # Duration of yield formation in growing degree/calendar days
self.HIend = 0
self.MaxCanopy = 0
self.CanopyDevEnd = 0
self.Canopy10Pct = 0
self.GDDmethod = 2 # Growing degree day calculation method
self.Tbase = 8 # Base temperature (degC) below which growth does not progress
self.Tupp = 30 # Upper temperature (degC) above which crop development no longer increases
self.PolHeatStress = 1 # Pollination affected by heat stress (0 = No, 1 = Yes)
self.Tmax_up = 40 # Maximum air temperature (degC) above which pollination begins to fail
self.Tmax_lo = 45 # Maximum air temperature (degC) at which pollination completely fails
self.PolColdStress = 1 # Pollination affected by cold stress (0 = No, 1 = Yes)
self.Tmin_up = 10 # Minimum air temperature (degC) below which pollination begins to fail
self.Tmin_lo = 5 # Minimum air temperature (degC) at which pollination completely fails
self.TrColdStress = 1 # Transpiration affected by cold temperature stress (0 = No, 1 = Yes)
self.GDD_up = 12 # Minimum growing degree days (degC/day) required for full crop transpiration potential
self.GDD_lo = 0 # Growing degree days (degC/day) at which no crop transpiration occurs
self.Zmin = 0.3 # Minimum effective rooting depth (m)
self.Zmax = 1.7 # Maximum rooting depth (m)
self.fshape_r = 1.3 # Shape factor describing root expansion
self.SxTopQ = 0.0480 # Maximum root water extraction at top of the root zone (m3/m3/day)
self.SxBotQ = 0.0117 # Maximum root water extraction at the bottom of the root zone (m3/m3/day)
self.SxTop = 0.
self.SxBot = 0.
self.SeedSize = 6.5 # Soil surface area (cm2) covered by an individual seedling at 90% emergence
self.PlantPop = 75_000 # Number of plants per hectare
self.CCx = 0.96 # Maximum canopy cover (fraction of soil cover)
self.CDC = 0.01 # Canopy decline coefficient (fraction per GDD/calendar day)
self.CGC = 0.0125 # Canopy growth coefficient (fraction per GDD)
self.Kcb = 1.05 # Crop coefficient when canopy growth is complete but prior to senescence
self.fage = 0.3 # Decline of crop coefficient due to ageing (%/day)
self.WP = 33.7 # Water productivity normalized for ET0 and C02 (g/m2)
self.WPy = 100 # Adjustment of water productivity in yield formation stage (% of WP)
self.fsink = 0.5 # Crop performance under elevated atmospheric CO2 concentration (%/100)
self.HI0 = 0.48 # Reference harvest index
self.dHI_pre = 0 # Possible increase of harvest index due to water stress before flowering (%)
self.a_HI = 7 # Coefficient describing positive impact on harvest index of restricted vegetative growth during yield formation
self.b_HI = 3 # Coefficient describing negative impact on harvest index of stomatal closure during yield formation
self.dHI0 = 15 # Maximum allowable increase of harvest index above reference value
self.Determinant = 1 # Crop Determinancy (0 = Indeterminant, 1 = Determinant)
self.exc = 50 # Excess of potential fruits
self.p_up = np.zeros(4) # Upper soil water depletion threshold for water stress effects on affect canopy expansion
self.p_lo = np.zeros(4) # Lower soil water depletion threshold for water stress effects on canopy expansion
self.fshape_w = np.ones(4) # Shape factor describing water stress effects on canopy expansion
self.CC0 = 0.
self.HIGC = 0.
self.tLinSwitch = 0
self.dHILinear = 0.
self.fCO2 = 0.
self.FloweringCD = 0
self.FloweringEnd=0.
# Cell
spec=[
('AgeDays', float64),
('AgeDays_NS', float64),
('AerDays', float64),
('AerDaysComp', float64[:]),
('IrrCum', float64),
('DelayedGDDs', float64),
('DelayedCDs', float64),
('PctLagPhase', float64),
('tEarlySen', float64),
('GDDcum', float64),
('DaySubmerged', float64),
('IrrNetCum', float64),
('DAP', int64),
('Epot', float64),
('Tpot', float64),
('PreAdj', boolean),
('CropMature', boolean),
('CropDead', boolean),
('Germination', boolean),
('PrematSenes', boolean),
('HarvestFlag', boolean),
('GrowingSeason', boolean),
('YieldForm', boolean),
('Stage2', boolean),
('WTinSoil', boolean),
('Stage', float64),
('Fpre', float64),
('Fpost', float64),
('fpost_dwn', float64),
('fpost_upp', float64),
('HIcor_Asum', float64),
('HIcor_Bsum', float64),
('Fpol', float64),
('sCor1', float64),
('sCor2', float64),
('HIref', float64),
('GrowthStage', float64),
('TrRatio', float64),
('rCor', float64),
('CC', float64),
('CCadj', float64),
('CC_NS', float64),
('CCadj_NS', float64),
('B', float64),
('B_NS', float64),
('HI', float64),
('HIadj', float64),
('CCxAct', float64),
('CCxAct_NS', float64),
('CCxW', float64),
('CCxW_NS', float64),
('CCxEarlySen', float64),
('CCprev', float64),
('ProtectedSeed', int64),
('Y', float64),
('Zroot', float64),
('CC0adj', float64),
('SurfaceStorage', float64),
('zGW', float64),
('th_fc_Adj', float64[:]),
('th', float64[:]),
('thini', float64[:]),
('TimeStepCounter', int64),
('P', float64),
('Tmax', float64),
('Tmin', float64),
('Et0', float64),
('GDD', float64),
('Wsurf', float64),
('EvapZ', float64),
('Wstage2', float64),
('Depletion', float64),
('TAW', float64),
]
@jitclass(spec)
class InitCondClass:
'''
The InitCond Class contains all Paramaters and variables used in the simulation
updated each timestep with the name NewCond
'''
def __init__(self,num_comp):
# counters
self.AgeDays = 0
self.AgeDays_NS = 0
self.AerDays = 0
self.AerDaysComp = np.zeros(num_comp)
self.IrrCum = 0
self.DelayedGDDs = 0
self.DelayedCDs = 0
self.PctLagPhase = 0
self.tEarlySen = 0
self.GDDcum = 0
self.DaySubmerged = 0
self.IrrNetCum = 0
self.DAP = 0
self.Epot = 0
self.Tpot = 0
# States
self.PreAdj = False
self.CropMature = False
self.CropDead = False
self.Germination = False
self.PrematSenes = False
self.HarvestFlag = False
self.GrowingSeason = False
self.YieldForm = False
self.Stage2 = False
self.WTinSoil = False
# HI
self.Stage = 1
self.Fpre = 1
self.Fpost = 1
self.fpost_dwn = 1
self.fpost_upp = 1
self.HIcor_Asum = 0
self.HIcor_Bsum = 0
self.Fpol = 0
self.sCor1 = 0
self.sCor2 = 0
self.HIref = 0.
# GS
self.GrowthStage = 0
#Transpiration
self.TrRatio = 1
# crop growth
self.rCor = 1
self.CC = 0
self.CCadj = 0
self.CC_NS = 0
self.CCadj_NS = 0
self.B = 0
self.B_NS = 0
self.HI = 0
self.HIadj = 0
self.CCxAct = 0
self.CCxAct_NS = 0
self.CCxW = 0
self.CCxW_NS = 0
self.CCxEarlySen = 0
self.CCprev = 0
self.ProtectedSeed = 0
self.Y = 0
self.Zroot = 0
self.CC0adj = 0
self.SurfaceStorage = 0
self.zGW = -999
self.th_fc_Adj = np.zeros(num_comp)
self.th = np.zeros(num_comp)
self.thini = np.zeros(num_comp)
self.TimeStepCounter=0
self.P=0
self.Tmax=0
self.Tmin=0
self.Et0=0
self.GDD=0
self.Wsurf=0
self.EvapZ=0
self.Wstage2=0
self.Depletion=0
self.TAW=0
# Cell
spec = [
('Act', float64),
('Sat', float64),
('Fc', float64),
('Wp', float64),
('Dry', float64),
]
@jitclass(spec)
class WevapClass(object):
"""
stores soil water contents in the evaporation layer
**Attributes:**\n
`Sat` : `float` : Water storage in evaporation layer at saturation (mm)
`Fc` : `float` : Water storage in evaporation layer at Field Capacity (mm)
`Wp` : `float`: Water storage in evaporation layer at Wilting Point (mm)
`Dry` : `float` : Water storage in evaporation layer at air dry (mm)
`Act` : `float` : Actual Water storage in evaporation layer (mm)
"""
def __init__(self):
self.Sat = 0.
self.Fc = 0.
self.Wp = 0.
self.Dry = 0.
self.Act = 0.
# Cell
spec = [
('Comp', int64[:]),
('dz', float64[:]),
('Layer', int64[:]),
('dzsum', float64[:]),
('th_fc', float64[:]),
('Layer_dz', float64[:]),
('th_s', float64[:]),
('th_wp', float64[:]),
('Ksat', float64[:]),
('Penetrability', float64[:]),
('th_dry', float64[:]),
('tau', float64[:]),
('zBot', float64[:]),
('zTop', float64[:]),
('zMid', float64[:]),
('th_fc_Adj', float64[:]),
('aCR', float64[:]),
('bCR', float64[:]),
]
@jitclass(spec)
class SoilProfileClass:
"""
**Attributes:**\n
`Comp` : `list` :
`Layer` : `list` :
`dz` : `list` :
`dzsum` : `list` :
`zBot` : `list` :
`zTop` : `list` :
`zMid` : `list` :
"""
def __init__(self,length):
self.Comp = np.zeros(length,dtype=int64)
self.dz = np.zeros(length,dtype=float64)
self.Layer = np.zeros(length,dtype=int64)
self.dzsum = np.zeros(length,dtype=float64)
self.th_fc = np.zeros(length,dtype=float64)
self.Layer_dz = np.zeros(length,dtype=float64)
self.th_s = np.zeros(length,dtype=float64)
self.th_wp = np.zeros(length,dtype=float64)
self.Ksat = np.zeros(length,dtype=float64)
self.Penetrability = np.zeros(length,dtype=float64)
self.th_dry = np.zeros(length,dtype=float64)
self.tau = np.zeros(length,dtype=float64)
self.zBot = np.zeros(length,dtype=float64)
self.zTop = np.zeros(length,dtype=float64)
self.zMid = np.zeros(length,dtype=float64)
self.th_fc_Adj = np.zeros(length,dtype=float64)
self.aCR = np.zeros(length,dtype=float64)
self.bCR = np.zeros(length,dtype=float64)
# Cell
spec = [
('Rz', float64),
('Zt', float64),
]
@jitclass(spec)
class TAWClass:
"""
**Attributes:**\n
`Rz` : `float` : .
`Zt` : `float` : .
"""
def __init__(self):
self.Rz = 0.
self.Zt = 0.
# Cell
spec = [
('Rz', float64),
('Zt', float64),
]
@jitclass(spec)
class DrClass:
"""
**Attributes:**\n
`Rz` : `float` : .
`Zt` : `float` : .
"""
def __init__(self):
self.Rz = 0.
self.Zt = 0.
# Cell
spec = [
('Act', float64),
('S', float64),
('FC', float64),
('WP', float64),
('Dry', float64),
('Aer', float64),
]
@jitclass(spec)
class thRZClass(object):
"""
root zone water content
**Attributes:**\n
`Act` : `float` : .
`S` : `float` : .
`FC` : `float` : .
`WP` : `float` : .
`Dry` : `float` : .
`Aer` : `float` : .
"""
def __init__(self):
self.Act = 0.
self.S = 0.
self.FC = 0.
self.WP = 0.
self.Dry = 0.
self.Aer = 0.
# Cell
spec = [
('Exp', float64),
('Sto', float64),
('Sen', float64),
('Pol', float64),
('StoLin', float64),
]
@jitclass(spec)
class KswClass(object):
"""
water stress coefficients
**Attributes:**\n
`Exp` : `float` : .
`Sto` : `float` : .
`Sen` : `float` : .
`Pol` : `float` : .
`StoLin` : `float` : .
"""
def __init__(self):
self.Exp = 1.
self.Sto = 1.
self.Sen = 1.
self.Pol = 1.
self.StoLin = 1.
# Cell
spec = [
('PolH', float64),
('PolC', float64),
]
@jitclass(spec)
class KstClass(object):
"""
temperature stress coefficients
**Attributes:**\n
`PolH` : `float` : heat stress
`PolC` : `float` : cold stress
"""
def __init__(self):
self.PolH = 1.
self.PolC = 1.
# Cell
spec = [
('RefConc', float64),
('CurrentConc', float64),
]
@jitclass(spec)
class CO2Class(object):
"""
**Attributes:**\n
`RefConc` : `float` : reference CO2 concentration
`CurrentConc` : `float` : current CO2 concentration
"""
def __init__(self):
self.RefConc = 369.41
self.CurrentConc = 0. | [
"tomk10tk@gmail.com"
] | tomk10tk@gmail.com |
d4c07aa542fd2df9f7066b893a929bbebdacca97 | 0eb3cb7493b6cc604a1aea9afc7af02e89b38602 | /Chapter10. Files/file.py | 16b8206030bce27b3aa6d69377aa5c469ab2a262 | [] | no_license | ec4sug4/i | 8b7c2d21ff3e7c763464f3a77ea009683eb17d51 | 1dbd58bb12729749c220b9f1f92f63389e7a886c | refs/heads/master | 2023-05-10T17:08:57.966542 | 2020-07-02T09:33:01 | 2020-07-02T09:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | fileref = open("olympics.txt","r")
line = fileref.readlines()
for i in line[:4]:
print(i)
fileref.close() | [
"subham.kumar032@gmail.com"
] | subham.kumar032@gmail.com |
91bfa4b69dc8175e14f2c85dffe644cc6f7a0d71 | fe9e6580e954ed62c4e8fd6b860000bb553150a6 | /ecommerce/forms.py | bffb01b5ed4507bffcb530dd54713c62b71512fe | [] | no_license | Brucehaha/ecommerce | 037fb25608e848f5c0fd4ed78f42028d21872e39 | bea5e5a13ad1e958912b0ac99cfc556a593f91f3 | refs/heads/workplace | 2023-01-03T19:35:13.894572 | 2018-06-20T07:22:19 | 2018-06-20T07:22:19 | 124,492,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django import forms
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Your fullname"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder": "Your Email"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Year message"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
| [
"henninglee2013@gmail.com"
] | henninglee2013@gmail.com |
52b202fd47aace9a4d7ef4788898606a49878af1 | 23f78b8e4547443ba3285440dd40ca1684109b9e | /model.py | 084e26aaebba616ae3aeccf6deeb77f93ac212b1 | [] | no_license | suneelc12/Rasa_NLU | 3f12b0bff32978b76cc492e7358c5907b1610778 | 9001bfc926a0826ad2615a8395597d7cbc2448d9 | refs/heads/master | 2020-04-11T03:41:41.668784 | 2019-03-07T21:06:19 | 2019-03-07T21:06:19 | 161,486,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import os
import datetime
import sys
#import shutil
#modified_time=datetime.datetime.fromtimestamp(os.path.getmtime('C:/Users/ramad/Downloads/chatbot-node-rasa-master/HRbot/HR_Bot.json'))
#print(modified_time)
directory = 'C:/Users/ramad/Downloads/chatbot-node-rasa-master/models/default/'
def all_subdirs_of(b=directory):
result = []
for d in os.listdir(b):
bd = os.path.join(b, d)
if os.path.isdir(bd): result.append(bd)
return result
latest_subdir = max(all_subdirs_of(directory), key=os.path.getmtime)
print(latest_subdir )
sys.stdout.flush()
#import os
#import time
#import operator
#alist={}
#directory= 'C:/Users/ramad/Downloads/chatbot-node-rasa-master/models/default/'
#os.chdir(directory)
#for file in os.listdir("."):
# if os.path.isdir(file):
# timestamp = os.path.getmtime( file )
# # get timestamp and directory name and store to dictionary
# alist[os.path.join(os.getcwd(),file)]=timestamp
## sort the timestamp
#for i in sorted(alist.items(), key=operator.itemgetter(1)):
# latest="%s" % ( i[0])
#print ("newest directory is ", latest) | [
"noreply@github.com"
] | noreply@github.com |
1e6d34cd5428851cdf59a0a8cbcabbedc98ffb63 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/04. List/052.py | b71c6b6b072e84b96609243c216c08fb45331666 | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #리스트에 원소 추가 _ append()
movie_rank = ["닥터 스트레인지", "스플릿", "럭키"]
movie_rank.append("배트맨")
print(movie_rank) | [
"skfls2618@naver.com"
] | skfls2618@naver.com |
8c8b11b281a3a8c90dc800644e35e30ea14afc61 | 3a7ef35a51aabaf762dca13f2197548380121ad8 | /beer-song/beer_song_test.py | 761f09f04a2044f0a3c224faaa93715505645455 | [
"Unlicense"
] | permissive | ikostan/Exercism_Python_Track | ff0be0386cf3fb1b62db54f72b8db15161928af7 | a6d52ad74e36db1d2bf82ed15362c1e4341d741d | refs/heads/master | 2023-08-09T16:56:17.615800 | 2020-10-16T00:46:55 | 2020-10-16T00:46:55 | 191,260,562 | 0 | 0 | Unlicense | 2023-09-04T01:17:29 | 2019-06-10T23:41:50 | Python | UTF-8 | Python | false | false | 19,079 | py | import unittest
from beer_song import recite
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class BeerSongTest(unittest.TestCase):
def test_first_generic_verse(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99), expected)
def test_last_generic_verse(self):
expected = [
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
]
self.assertEqual(recite(start=3), expected)
def test_verse_with_2_bottles(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
]
self.assertEqual(recite(start=2), expected)
def test_verse_with_1_bottle(self):
expected = [
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
]
self.assertEqual(recite(start=1), expected)
def test_verse_with_0_bottles(self):
expected = [
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=0), expected)
def test_first_two_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=2), expected)
def test_last_three_verses(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=2, take=3), expected)
def test_all_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
"",
"97 bottles of beer on the wall, 97 bottles of beer.",
"Take one down and pass it around, 96 bottles of beer on the wall.",
"",
"96 bottles of beer on the wall, 96 bottles of beer.",
"Take one down and pass it around, 95 bottles of beer on the wall.",
"",
"95 bottles of beer on the wall, 95 bottles of beer.",
"Take one down and pass it around, 94 bottles of beer on the wall.",
"",
"94 bottles of beer on the wall, 94 bottles of beer.",
"Take one down and pass it around, 93 bottles of beer on the wall.",
"",
"93 bottles of beer on the wall, 93 bottles of beer.",
"Take one down and pass it around, 92 bottles of beer on the wall.",
"",
"92 bottles of beer on the wall, 92 bottles of beer.",
"Take one down and pass it around, 91 bottles of beer on the wall.",
"",
"91 bottles of beer on the wall, 91 bottles of beer.",
"Take one down and pass it around, 90 bottles of beer on the wall.",
"",
"90 bottles of beer on the wall, 90 bottles of beer.",
"Take one down and pass it around, 89 bottles of beer on the wall.",
"",
"89 bottles of beer on the wall, 89 bottles of beer.",
"Take one down and pass it around, 88 bottles of beer on the wall.",
"",
"88 bottles of beer on the wall, 88 bottles of beer.",
"Take one down and pass it around, 87 bottles of beer on the wall.",
"",
"87 bottles of beer on the wall, 87 bottles of beer.",
"Take one down and pass it around, 86 bottles of beer on the wall.",
"",
"86 bottles of beer on the wall, 86 bottles of beer.",
"Take one down and pass it around, 85 bottles of beer on the wall.",
"",
"85 bottles of beer on the wall, 85 bottles of beer.",
"Take one down and pass it around, 84 bottles of beer on the wall.",
"",
"84 bottles of beer on the wall, 84 bottles of beer.",
"Take one down and pass it around, 83 bottles of beer on the wall.",
"",
"83 bottles of beer on the wall, 83 bottles of beer.",
"Take one down and pass it around, 82 bottles of beer on the wall.",
"",
"82 bottles of beer on the wall, 82 bottles of beer.",
"Take one down and pass it around, 81 bottles of beer on the wall.",
"",
"81 bottles of beer on the wall, 81 bottles of beer.",
"Take one down and pass it around, 80 bottles of beer on the wall.",
"",
"80 bottles of beer on the wall, 80 bottles of beer.",
"Take one down and pass it around, 79 bottles of beer on the wall.",
"",
"79 bottles of beer on the wall, 79 bottles of beer.",
"Take one down and pass it around, 78 bottles of beer on the wall.",
"",
"78 bottles of beer on the wall, 78 bottles of beer.",
"Take one down and pass it around, 77 bottles of beer on the wall.",
"",
"77 bottles of beer on the wall, 77 bottles of beer.",
"Take one down and pass it around, 76 bottles of beer on the wall.",
"",
"76 bottles of beer on the wall, 76 bottles of beer.",
"Take one down and pass it around, 75 bottles of beer on the wall.",
"",
"75 bottles of beer on the wall, 75 bottles of beer.",
"Take one down and pass it around, 74 bottles of beer on the wall.",
"",
"74 bottles of beer on the wall, 74 bottles of beer.",
"Take one down and pass it around, 73 bottles of beer on the wall.",
"",
"73 bottles of beer on the wall, 73 bottles of beer.",
"Take one down and pass it around, 72 bottles of beer on the wall.",
"",
"72 bottles of beer on the wall, 72 bottles of beer.",
"Take one down and pass it around, 71 bottles of beer on the wall.",
"",
"71 bottles of beer on the wall, 71 bottles of beer.",
"Take one down and pass it around, 70 bottles of beer on the wall.",
"",
"70 bottles of beer on the wall, 70 bottles of beer.",
"Take one down and pass it around, 69 bottles of beer on the wall.",
"",
"69 bottles of beer on the wall, 69 bottles of beer.",
"Take one down and pass it around, 68 bottles of beer on the wall.",
"",
"68 bottles of beer on the wall, 68 bottles of beer.",
"Take one down and pass it around, 67 bottles of beer on the wall.",
"",
"67 bottles of beer on the wall, 67 bottles of beer.",
"Take one down and pass it around, 66 bottles of beer on the wall.",
"",
"66 bottles of beer on the wall, 66 bottles of beer.",
"Take one down and pass it around, 65 bottles of beer on the wall.",
"",
"65 bottles of beer on the wall, 65 bottles of beer.",
"Take one down and pass it around, 64 bottles of beer on the wall.",
"",
"64 bottles of beer on the wall, 64 bottles of beer.",
"Take one down and pass it around, 63 bottles of beer on the wall.",
"",
"63 bottles of beer on the wall, 63 bottles of beer.",
"Take one down and pass it around, 62 bottles of beer on the wall.",
"",
"62 bottles of beer on the wall, 62 bottles of beer.",
"Take one down and pass it around, 61 bottles of beer on the wall.",
"",
"61 bottles of beer on the wall, 61 bottles of beer.",
"Take one down and pass it around, 60 bottles of beer on the wall.",
"",
"60 bottles of beer on the wall, 60 bottles of beer.",
"Take one down and pass it around, 59 bottles of beer on the wall.",
"",
"59 bottles of beer on the wall, 59 bottles of beer.",
"Take one down and pass it around, 58 bottles of beer on the wall.",
"",
"58 bottles of beer on the wall, 58 bottles of beer.",
"Take one down and pass it around, 57 bottles of beer on the wall.",
"",
"57 bottles of beer on the wall, 57 bottles of beer.",
"Take one down and pass it around, 56 bottles of beer on the wall.",
"",
"56 bottles of beer on the wall, 56 bottles of beer.",
"Take one down and pass it around, 55 bottles of beer on the wall.",
"",
"55 bottles of beer on the wall, 55 bottles of beer.",
"Take one down and pass it around, 54 bottles of beer on the wall.",
"",
"54 bottles of beer on the wall, 54 bottles of beer.",
"Take one down and pass it around, 53 bottles of beer on the wall.",
"",
"53 bottles of beer on the wall, 53 bottles of beer.",
"Take one down and pass it around, 52 bottles of beer on the wall.",
"",
"52 bottles of beer on the wall, 52 bottles of beer.",
"Take one down and pass it around, 51 bottles of beer on the wall.",
"",
"51 bottles of beer on the wall, 51 bottles of beer.",
"Take one down and pass it around, 50 bottles of beer on the wall.",
"",
"50 bottles of beer on the wall, 50 bottles of beer.",
"Take one down and pass it around, 49 bottles of beer on the wall.",
"",
"49 bottles of beer on the wall, 49 bottles of beer.",
"Take one down and pass it around, 48 bottles of beer on the wall.",
"",
"48 bottles of beer on the wall, 48 bottles of beer.",
"Take one down and pass it around, 47 bottles of beer on the wall.",
"",
"47 bottles of beer on the wall, 47 bottles of beer.",
"Take one down and pass it around, 46 bottles of beer on the wall.",
"",
"46 bottles of beer on the wall, 46 bottles of beer.",
"Take one down and pass it around, 45 bottles of beer on the wall.",
"",
"45 bottles of beer on the wall, 45 bottles of beer.",
"Take one down and pass it around, 44 bottles of beer on the wall.",
"",
"44 bottles of beer on the wall, 44 bottles of beer.",
"Take one down and pass it around, 43 bottles of beer on the wall.",
"",
"43 bottles of beer on the wall, 43 bottles of beer.",
"Take one down and pass it around, 42 bottles of beer on the wall.",
"",
"42 bottles of beer on the wall, 42 bottles of beer.",
"Take one down and pass it around, 41 bottles of beer on the wall.",
"",
"41 bottles of beer on the wall, 41 bottles of beer.",
"Take one down and pass it around, 40 bottles of beer on the wall.",
"",
"40 bottles of beer on the wall, 40 bottles of beer.",
"Take one down and pass it around, 39 bottles of beer on the wall.",
"",
"39 bottles of beer on the wall, 39 bottles of beer.",
"Take one down and pass it around, 38 bottles of beer on the wall.",
"",
"38 bottles of beer on the wall, 38 bottles of beer.",
"Take one down and pass it around, 37 bottles of beer on the wall.",
"",
"37 bottles of beer on the wall, 37 bottles of beer.",
"Take one down and pass it around, 36 bottles of beer on the wall.",
"",
"36 bottles of beer on the wall, 36 bottles of beer.",
"Take one down and pass it around, 35 bottles of beer on the wall.",
"",
"35 bottles of beer on the wall, 35 bottles of beer.",
"Take one down and pass it around, 34 bottles of beer on the wall.",
"",
"34 bottles of beer on the wall, 34 bottles of beer.",
"Take one down and pass it around, 33 bottles of beer on the wall.",
"",
"33 bottles of beer on the wall, 33 bottles of beer.",
"Take one down and pass it around, 32 bottles of beer on the wall.",
"",
"32 bottles of beer on the wall, 32 bottles of beer.",
"Take one down and pass it around, 31 bottles of beer on the wall.",
"",
"31 bottles of beer on the wall, 31 bottles of beer.",
"Take one down and pass it around, 30 bottles of beer on the wall.",
"",
"30 bottles of beer on the wall, 30 bottles of beer.",
"Take one down and pass it around, 29 bottles of beer on the wall.",
"",
"29 bottles of beer on the wall, 29 bottles of beer.",
"Take one down and pass it around, 28 bottles of beer on the wall.",
"",
"28 bottles of beer on the wall, 28 bottles of beer.",
"Take one down and pass it around, 27 bottles of beer on the wall.",
"",
"27 bottles of beer on the wall, 27 bottles of beer.",
"Take one down and pass it around, 26 bottles of beer on the wall.",
"",
"26 bottles of beer on the wall, 26 bottles of beer.",
"Take one down and pass it around, 25 bottles of beer on the wall.",
"",
"25 bottles of beer on the wall, 25 bottles of beer.",
"Take one down and pass it around, 24 bottles of beer on the wall.",
"",
"24 bottles of beer on the wall, 24 bottles of beer.",
"Take one down and pass it around, 23 bottles of beer on the wall.",
"",
"23 bottles of beer on the wall, 23 bottles of beer.",
"Take one down and pass it around, 22 bottles of beer on the wall.",
"",
"22 bottles of beer on the wall, 22 bottles of beer.",
"Take one down and pass it around, 21 bottles of beer on the wall.",
"",
"21 bottles of beer on the wall, 21 bottles of beer.",
"Take one down and pass it around, 20 bottles of beer on the wall.",
"",
"20 bottles of beer on the wall, 20 bottles of beer.",
"Take one down and pass it around, 19 bottles of beer on the wall.",
"",
"19 bottles of beer on the wall, 19 bottles of beer.",
"Take one down and pass it around, 18 bottles of beer on the wall.",
"",
"18 bottles of beer on the wall, 18 bottles of beer.",
"Take one down and pass it around, 17 bottles of beer on the wall.",
"",
"17 bottles of beer on the wall, 17 bottles of beer.",
"Take one down and pass it around, 16 bottles of beer on the wall.",
"",
"16 bottles of beer on the wall, 16 bottles of beer.",
"Take one down and pass it around, 15 bottles of beer on the wall.",
"",
"15 bottles of beer on the wall, 15 bottles of beer.",
"Take one down and pass it around, 14 bottles of beer on the wall.",
"",
"14 bottles of beer on the wall, 14 bottles of beer.",
"Take one down and pass it around, 13 bottles of beer on the wall.",
"",
"13 bottles of beer on the wall, 13 bottles of beer.",
"Take one down and pass it around, 12 bottles of beer on the wall.",
"",
"12 bottles of beer on the wall, 12 bottles of beer.",
"Take one down and pass it around, 11 bottles of beer on the wall.",
"",
"11 bottles of beer on the wall, 11 bottles of beer.",
"Take one down and pass it around, 10 bottles of beer on the wall.",
"",
"10 bottles of beer on the wall, 10 bottles of beer.",
"Take one down and pass it around, 9 bottles of beer on the wall.",
"",
"9 bottles of beer on the wall, 9 bottles of beer.",
"Take one down and pass it around, 8 bottles of beer on the wall.",
"",
"8 bottles of beer on the wall, 8 bottles of beer.",
"Take one down and pass it around, 7 bottles of beer on the wall.",
"",
"7 bottles of beer on the wall, 7 bottles of beer.",
"Take one down and pass it around, 6 bottles of beer on the wall.",
"",
"6 bottles of beer on the wall, 6 bottles of beer.",
"Take one down and pass it around, 5 bottles of beer on the wall.",
"",
"5 bottles of beer on the wall, 5 bottles of beer.",
"Take one down and pass it around, 4 bottles of beer on the wall.",
"",
"4 bottles of beer on the wall, 4 bottles of beer.",
"Take one down and pass it around, 3 bottles of beer on the wall.",
"",
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
"",
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=100), expected)
| [
"igorkostan@gmail.com"
] | igorkostan@gmail.com |
a3cac7cff2c5cbd19e783ea7573def0d2719f2c2 | 967cd25c52be494817f69003ebcff5400ab1f51b | /thingspeak/testing/playground.py | e4c6b30449f5b80f757fb7921886b3266ff57354 | [
"MIT"
] | permissive | jutako/raspi | efc107bbf4c83d56ac8f8582dff8b3f56b151772 | f69d15a48765c85960e7d7da175d4f96cb1dfee3 | refs/heads/master | 2021-01-12T13:43:07.055659 | 2018-03-24T20:49:06 | 2018-03-24T20:49:06 | 72,223,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py |
temp = 10
hum = 20
key = 'VEHKJKJXTZBYLMVC'
import urllib
values = {'api_key' : key, 'field1' : temp, 'field2' : hum}
postdata = urllib.urlencode(values)
| [
"jussitapiokorpela@gmail.com"
] | jussitapiokorpela@gmail.com |
9fc2d2257d8e3f9e9aa7e8d7bae0c0760336eeb8 | 919b8d06881302998d58cdc760d336c5d70b9055 | /firstdjangoproject/settings.py | c029eacca7b9b257c82ae2ce83c10c7c60487fd7 | [] | no_license | dharadhorda/django | 4f7a64b0fecbae245635755de28bd630f42126ae | 37ba50d002fc3ff2e1978de9f61b1ade3b040fdc | refs/heads/master | 2022-12-12T04:33:08.365851 | 2020-09-10T13:33:32 | 2020-09-10T13:33:32 | 294,416,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | """
Django settings for firstdjangoproject project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&om63oy@2xt_rd#@c3=7(7l%catgjgc7zy1_fo*mvdt_1or%z1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstdjangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstdjangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
] | [
"71008060+dharadhorda@users.noreply.github.com"
] | 71008060+dharadhorda@users.noreply.github.com |
5ce2a703f5302283b074de6d2a1fb30fb8b91aa4 | bc0938b96b86d1396cb6b403742a9f8dbdb28e4c | /aliyun-python-sdk-nas/aliyunsdknas/request/v20170626/DescribeTagsRequest.py | d76b528b9d21f049ae887b42b56847b5cd568288 | [
"Apache-2.0"
] | permissive | jia-jerry/aliyun-openapi-python-sdk | fb14d825eb0770b874bc123746c2e45efaf64a6d | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | refs/heads/master | 2022-11-16T05:20:03.515145 | 2020-07-10T08:45:41 | 2020-07-10T09:06:32 | 278,590,780 | 0 | 0 | NOASSERTION | 2020-07-10T09:15:19 | 2020-07-10T09:15:19 | null | UTF-8 | Python | false | false | 2,120 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknas.endpoint import endpoint_data
class DescribeTagsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'NAS', '2017-06-26', 'DescribeTags','nas')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
for i in range(len(Tags)):
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value' , Tags[i].get('Value'))
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key' , Tags[i].get('Key'))
def get_FileSystemId(self):
return self.get_query_params().get('FileSystemId')
def set_FileSystemId(self,FileSystemId):
self.add_query_param('FileSystemId',FileSystemId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
4166e7f506510ccae3d18172138df9b4a82e5770 | 490fafa60041db0e4ba1cd8f0a147f26bcde8b81 | /Data-Structures/lists/finding.py | 9d600b83bb23370360836e7c079ac8cf1f49eac0 | [] | no_license | vahidsediqi/Python-basic-codes | 29bad84a700b91f7beb6f585634914e0e0523652 | ed1af51610d1d0c1d9f1cc1b032365b7f917686f | refs/heads/master | 2021-05-26T03:51:39.541880 | 2020-05-20T13:33:14 | 2020-05-20T13:33:14 | 254,041,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | letters = ['a','b','c','d','e']
# finding index of an item
# if the item is not in the the we get error
# to solve it we have to use if statment
print(letters.index('d'))
if 'f' in letters:
print(letters.index('f'))
else:
print('The letter is not exist') | [
"vsediqi@live.com"
] | vsediqi@live.com |
3a6d883858b8888b951ebe9478f8c055c4023350 | b801a549da493431071ac13c13927d545e036a82 | /KNN.py | 448c8057381b79425c45e4537c49e0d9800c1703 | [] | no_license | minytie/Recommender-Systems-with-different-algorithms | eca2b416a4e57b4e2f2324bb232f26db8de080df | 3bd8c007608eb5a479bc03720a2ef3ccbb6515ba | refs/heads/master | 2022-12-15T13:46:51.901417 | 2020-09-06T00:58:10 | 2020-09-06T00:58:10 | 293,178,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from surprise import KNNBasic
from surprise import Dataset
from surprise.model_selection import cross_validate
import json
from tqdm import tqdm
# Load the movielens-100k dataset (download it if needed).
data = Dataset.load_builtin('ml-100k')
for k in tqdm([5 * i for i in range(1,20)],desc= "running KNN : "):
# Use the famous SVD algorithm.
algo = KNNBasic(k = 5)
#algo.test()
# Run 5-fold cross-validation and print results.
performance = cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, n_jobs = -1 ,verbose=True)
for key in performance:
performance[key] = list(performance[key])
with open("evaluations/KNN_%d.json" % k,"w") as f:
f.write(json.dumps(performance))
| [
"victor@MacBook-Pro.local"
] | victor@MacBook-Pro.local |
7f150fe5a4b359dfe351f5c2d10a18def94f24ef | 38b5c22896452c7583073f0f719dcaaf98c0e7e2 | /client-GUI.py | 8b944cd71e788934413e2e9894eb3dc37af6b16b | [] | no_license | crcollver/group-messaging-app | 8be7565b62b45cec90cef197deffb5c68efbc5b6 | 89542c43ab6f566d457ed8cdec650e280b212193 | refs/heads/master | 2021-03-28T03:25:09.918567 | 2020-05-06T00:59:44 | 2020-05-06T00:59:44 | 247,832,270 | 0 | 0 | null | 2020-05-06T00:57:57 | 2020-03-16T22:55:50 | Python | UTF-8 | Python | false | false | 6,835 | py | #---------------------------------------------------
# Cameron Collver, Erik Shepard, & Rodolfo Rodriguez
# Anonymous Group Messaging - client-GUI.py
# Client Tkinter script for connecting to server.py
# Uses a default port of 12000 that is unchangeable for now
#
# SOURCES:
# https://www.youtube.com/watch?v=FKlmAkEb40s
# http://net-informations.com/python/net/thread.htm
# https://www.tutorialspoint.com/socket-programming-with-multi-threading-in-python
# https://github.com/effiongcharles/multi_user_chat_application_in_python
#---------------------------------------------------
from __future__ import unicode_literals
import socket
import threading
import tkinter
from tkinter import messagebox
from tkinter import simpledialog
host = socket.gethostbyname(socket.gethostname())
port = 12000
clientSocket = None
username = ""
window = tkinter.Tk()
window.title("Client")
def close_connection():
""" Handles an explicit closing of the connection """
if clientSocket is not None:
clientSocket.sendall("exit".encode("utf-8"))
clientSocket.close()
window.destroy()
window.protocol("WM_DELETE_WINDOW", close_connection)
# Top frame to connect
topFrame = tkinter.Frame(window)
lblHost = tkinter.Label(topFrame, text = "Host IP:").pack(side=tkinter.LEFT)
entHost = tkinter.Entry(topFrame)
entHost.pack(side=tkinter.LEFT, padx=(0, 3))
entHost.insert(tkinter.END, host)
btnConnect = tkinter.Button(topFrame, text="Connect", command=lambda : connect())
btnConnect.pack(side=tkinter.LEFT)
topFrame.pack(side=tkinter.TOP, pady=(5, 10))
# Display frame to show all messages
displayFrame = tkinter.Frame(window)
scrollBar = tkinter.Scrollbar(displayFrame)
scrollBar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
tkDisplay = tkinter.Text(displayFrame, height=20, width=60)
tkDisplay.pack(side=tkinter.LEFT, fill=tkinter.Y, padx=(5, 0))
tkDisplay.tag_config("tag_your_message", foreground="blue")
tkDisplay.tag_config("tag_direct_message", foreground="green")
scrollBar.config(command=tkDisplay.yview)
tkDisplay.config(yscrollcommand=scrollBar.set, background="#F4F6F7", highlightbackground="grey", state="disabled")
displayFrame.pack(side=tkinter.TOP)
# bottom frame for sending a message
bottomFrame = tkinter.Frame(window)
tkMessage = tkinter.Text(bottomFrame, height=2, width=60)
tkMessage.pack(side=tkinter.LEFT, padx=(5, 13), pady=(5, 10))
tkMessage.config(highlightbackground="grey", state="disabled")
tkMessage.bind("<Return>", (lambda event: get_msg(tkMessage.get("1.0", tkinter.END))))
bottomFrame.pack(side=tkinter.BOTTOM)
def connect():
""" Connect to specified server based on runtime arguments """
global clientSocket, host, port
if len(entHost.get()) > 1:
host = entHost.get() # change host to user specified host
try:
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((host, port))
select_username()
if clientSocket is not None and username: # as long as the socket still exists and there is a valid username
# Create the thread that will receive messages only after username is established
# This thread will be killed once the socket is closed
RECEIVE_THREAD = threading.Thread(target=receive_message)
RECEIVE_THREAD.daemon = True
RECEIVE_THREAD.start()
except Exception as e:
tkinter.messagebox.showerror(title="ERROR", message=f"Unable to connect to host: {host}:{port}. Server may be unavailable.")
def select_username():
""" Once a connection is established, make user select a username for this session """
global username, clientSocket
while True:
try:
username = simpledialog.askstring("Setup", "Enter a username...", parent=window)
if username is not None:
clientSocket.sendall(username.encode("utf-8"))
server_res = clientSocket.recv(1024) #sending potential username to server
# checking for byte message that server sent back should be fine for our app
tkDisplay.config(state=tkinter.NORMAL)
if (server_res.decode() == "username_avail"):
tkDisplay.insert(tkinter.END, f"Great this username is available!\n<@{username}> will be your username for this session.")
tkMessage.config(state=tkinter.NORMAL) # set the message box to an enabled state to capture username
entHost.config(state=tkinter.DISABLED) # Disable host input box once a connection has been made
btnConnect.config(state=tkinter.DISABLED) # Disable connect button once a connection has been made
break
tkDisplay.insert(tkinter.END, f"The username {username} seems to be taken, lets try again.\n")
tkDisplay.config(state=tkinter.DISABLED)
else:
clientSocket.close()
clientSocket = None
break # return to the main window and have user reconnect
except ConnectionAbortedError:
tkinter.messagebox.showerror(title="SERVER ERROR", message=f"Server on {host}:{port} has shutdown unexpectedly.")
break
def receive_message():
""" Handles the receiving of server messages, without blocking main thread """
global clientSocket
while True:
try:
server_msg = clientSocket.recv(1024)
if not server_msg:
break
tkDisplay.config(state=tkinter.NORMAL)
if server_msg.decode().startswith("From <@"): # if message is a direct message, color it green
tkDisplay.insert(tkinter.END, f"\n{server_msg.decode()}", "tag_direct_message")
else:
tkDisplay.insert(tkinter.END, f"\n{server_msg.decode()}")
tkDisplay.config(state=tkinter.DISABLED)
tkDisplay.see(tkinter.END)
# throws this error when server shuts down with clients still connected
except ConnectionResetError:
tkinter.messagebox.showerror(title="SERVER ERROR", message=f"Server on {host}:{port} has shutdown unexpectedly.")
break
# throws this error when user types exit, suppresses it
except ConnectionAbortedError:
break
clientSocket.close()
window.destroy()
def get_msg(msg):
""" Get the user message from the message text box """
msg = msg.replace('\n', '')
# if this is a regular message, print it to the window
# otherwise user is sending potential user name so we do not display
tkDisplay.config(state=tkinter.NORMAL) # cannot insert into a window that is disabled
tkDisplay.insert(tkinter.END, f"\n<@{username}>: {msg}", "tag_your_message")
tkDisplay.config(state=tkinter.DISABLED) # disable window once insert it performed
tkDisplay.see(tkinter.END) # scroll if not enough room in window
tkMessage.delete('1.0', tkinter.END) # remove text in message window
send_message(msg)
def send_message(msg):
""" Sends the message to server on the main thread """
clientSocket.sendall(msg.encode("utf-8"))
if msg == "exit":
close_connection()
window.mainloop()
| [
"crcollver@gmail.com"
] | crcollver@gmail.com |
f29c840f7b7123d115bd70933064581e49a94100 | 96faedaf3717abbb7f6ddf215b7152808b344301 | /build_model.py | 16e2f2d343909a2776c51d81254f7eb0c58b4a68 | [] | no_license | Anonymous-Alien/Greedy-Attack-and-Gumbel-Attack | 9c1b6e6d0ec334efbe11581c7a32f7b545932bfb | 021edaf7318850df4437c8de56c02321d2d4f552 | refs/heads/master | 2020-04-18T01:36:46.665448 | 2019-01-23T05:51:05 | 2019-01-23T05:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,423 | py | import tensorflow as tf
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Embedding, Conv1D, Input, GlobalMaxPooling1D, Multiply, Lambda, Permute,MaxPooling1D, Flatten, LSTM, Bidirectional, GRU, GlobalAveragePooling1D
from keras.datasets import imdb
from keras.objectives import binary_crossentropy
from keras.metrics import binary_accuracy as accuracy
from keras.optimizers import RMSprop
from keras import backend as K
from keras.preprocessing import sequence
from keras.callbacks import ModelCheckpoint
import os, itertools, math
def construct_original_network(emb, data,trainable=True):
if data == 'imdbcnn':
filters = 250
kernel_size = 3
hidden_dims = 250
net = Dropout(0.2, name = 'dropout_1')(emb)
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
net = Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1,
name = 'conv1d_1',trainable=trainable)(net)
# we use max pooling:
net = GlobalMaxPooling1D(name = 'global_max_pooling1d_1')(net)
# We add a vanilla hidden layer:
net = Dense(hidden_dims, name = 'dense_1',trainable=trainable)(net)
net = Dropout(0.2, name = 'dropout_2')(net)
net = Activation('relu', name = 'activation_2')(net)
# We project onto a single unit output layer, and squash it with a sigmoid:
net = Dense(2, name = 'dense_2',trainable=trainable)(net)
preds = Activation('softmax', name = 'activation_3')(net)
return preds
elif data == 'yahoolstm':
lstm_out = Bidirectional(LSTM(256,trainable=trainable), trainable = trainable)(emb)
net = Dropout(0.5)(lstm_out)
preds = Dense(10, activation='softmax',trainable=trainable)(net)
return preds
class TextModel():
def __init__(self, data, train = False):
self.data = data
print('Loading TextModel...')
if data == 'imdbcnn':
filters = 250
hidden_dims = 250
self.embedding_dims = 50
self.maxlen = 400
self.num_classes = 2
self.num_words = 20002
self.type = 'word'
if not train:
K.set_learning_phase(0)
X_ph = Input(shape=(self.maxlen,), dtype='int32')
emb_layer = Embedding(
self.num_words,
self.embedding_dims,
input_length=self.maxlen,
name = 'embedding_1'
)
emb_out = emb_layer(X_ph)
if train:
preds = construct_original_network(emb_out, data)
else:
emb_ph = Input(
shape=(self.maxlen, self.embedding_dims),
dtype='float32'
)
preds = construct_original_network(emb_ph, data)
if not train:
model1 = Model(X_ph, emb_out)
model2 = Model(emb_ph, preds)
pred_out = model2(model1(X_ph))
pred_model = Model(X_ph, pred_out)
pred_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
self.pred_model = pred_model
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(preds[:,c], emb_ph))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs = grads * tf.expand_dims(emb_ph, 0)
# [num_classes, batchsize, maxlen, embedding_dims]
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = X_ph
self.emb_out = emb_out
self.emb_ph = emb_ph
weights_name = 'original.h5'
model1.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
model2.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
self.pred_model.load_weights('{}/models/{}'.format(data, weights_name),
by_name=True)
print('Model constructed.', weights_name)
# For validating the data.
emb_weights = emb_layer.get_weights()
emb_weights[0][0] = np.zeros(50)
self.emb_weights = emb_weights[0]
emb_layer.set_weights(emb_weights)
else:
pred_model = Model(X_ph, preds)
pred_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.pred_model = pred_model
from load_data import Data
dataset = Data(self.data, train = True)
self.train(dataset)
print('Training is done.')
elif data == 'agccnn':
from agccnn.data_helpers import create_vocab_set, construct_batch_generator, find_words_positions
filter_kernels = [7, 7, 3, 3, 3, 3]
dense_outputs = 1024
self.charlen = 1014
self.maxlen = 1014
nb_filter = 256
self.num_classes = 4
self.vocab, self.reverse_vocab, self.vocab_size, self.vocab_check = create_vocab_set()
self.embedding_dims = self.vocab_size
self.type = 'char'
K.set_learning_phase(1 if train else 0)
#Define what the input shape looks like
inputs = Input(shape=(self.charlen, self.vocab_size), name='input', dtype='float32')
conv = Conv1D(filters = nb_filter, kernel_size= filter_kernels[0], padding = 'valid', activation = 'relu', input_shape=(self.charlen, self.vocab_size))(inputs)
conv = MaxPooling1D(pool_size=3)(conv)
conv1 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[1], padding = 'valid', activation = 'relu')(conv)
conv1 = MaxPooling1D(pool_size=3)(conv1)
conv2 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[2], padding = 'valid', activation = 'relu')(conv1)
conv3 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[3], padding = 'valid', activation = 'relu')(conv2)
conv4 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[4], padding = 'valid', activation = 'relu')(conv3)
conv5 = Conv1D(filters = nb_filter, kernel_size= filter_kernels[5], padding = 'valid', activation = 'relu')(conv4)
conv5 = MaxPooling1D(pool_size=3)(conv5)
conv5 = Flatten()(conv5)
#Two dense layers with dropout of .5
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(conv5))
z = Dropout(0.5)(Dense(dense_outputs, activation='relu')(z))
#Output dense layer with softmax activation
pred = Dense(self.num_classes, activation='softmax', name='output')(z)
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(pred[:,c], inputs))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, self.charlen, embedding_dims]
approxs = grads * tf.expand_dims(inputs, 0)
# [num_classes, batchsize, self.charlen, embedding_dims]
model = Model(inputs, pred)
model.compile(
loss='categorical_crossentropy',
optimizer="sgd",
metrics=['accuracy']
)
model.load_weights(
'agccnn/params/crepe_model_weights-15.h5',
by_name=True
)
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = inputs
self.model = model
from nltk.tokenize.moses import MosesDetokenizer
from nltk import word_tokenize
detokenizer = MosesDetokenizer()
self.tokenize = word_tokenize
self.detokenize = detokenizer.detokenize
self.construct_batch_generator = construct_batch_generator
self.find_words_positions = lambda sent: find_words_positions(
sent,
word_tokenize(sent),
self.charlen,
self.vocab,
self.vocab_size,
self.vocab_check
)
self.find_chars_positions = lambda sent: find_words_positions(
sent,
list(sent.lower().replace(' ', '')),
self.charlen,
self.vocab,
self.vocab_size,
self.vocab_check,
True
)
elif data == 'yahoolstm':
self.maxlen = 400
self.num_classes = 10
self.num_words = 20000
self.batch_size = 40
self.embedding_dims = 300
if not train:
K.set_learning_phase(0)
X_ph = Input(shape=(self.maxlen,), dtype='int32')
emb_layer = Embedding(
input_dim=self.num_words + 1,
output_dim= self.embedding_dims,
input_length=self.maxlen,
name = "embedding",
trainable=True)
emb = emb_layer(X_ph)
if train:
preds = construct_original_network(emb, data)
else:
emb_ph = Input(shape=(self.maxlen,self.embedding_dims), dtype='float32')
preds = construct_original_network(emb_ph, data)
if train:
model = Model(X_ph, preds)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
else:
model1 = Model(X_ph, emb)
model2 = Model(emb_ph, preds)
pred_out = model2(model1(X_ph))
model = Model(X_ph, pred_out)
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# Construct gradients.
grads = []
for c in range(self.num_classes):
grads.append(tf.gradients(preds[:,c], emb_ph))
grads = tf.concat(grads, axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs = grads * tf.expand_dims(emb_ph, 0)
# [num_classes, batchsize, maxlen, embedding_dims]
prev_epoch = 0; prev_itr = 7
model1.load_weights(
'yahoolstm/models/original-{}-{}.hdf5'.format(prev_epoch, prev_itr),
by_name = True
)
model2.load_weights(
'yahoolstm/models/original-{}-{}.hdf5'.format(prev_epoch, prev_itr),
by_name = True
)
emb_weights = emb_layer.get_weights()
self.emb_weights = emb_weights
self.emb_out = emb
self.emb_ph = emb_ph
self.sess = K.get_session()
self.grads = grads
self.approxs = approxs
self.input_ph = X_ph
self.pred_model = model
self.type = 'word'
if train:
from load_data import Data
print('Loading data...')
dataset = Data(data, train = True)
print('Training...')
self.train(dataset)
def train(self, dataset):
if self.data == 'imdbcnn':
epochs = 5
batch_size = 40
filepath = '{}/models/original.h5'.format(self.data)
checkpoint = ModelCheckpoint(
filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
self.pred_model.fit(dataset.x_train, dataset.y_train, validation_data=(dataset.x_val, dataset.y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size)
elif self.data == 'yahoolstm':
model = self.pred_model
if 'models' not in os.listdir(self.data):
os.mkdir('{}/models'.format(self.data))
num_iters = int(math.ceil(len(dataset.x_train) * 1.0 / self.batch_size))
num_val_iters = int(math.ceil(len(dataset.x_val) * 1.0 / self.batch_size))
save_freq = 20
save_interval = int(num_iters // save_freq)
val_interval = 20
np.random.seed(0)
epochs = 3
for e in range(epochs):
print("epoch %d" % e)
# random permutes the data.
idx = np.random.permutation(len(dataset.x_train))
x_train, y_train = dataset.x_train[idx], dataset.y_train[idx]
val_batch_itr = 0
for i in range(0, num_iters):
batch_x = x_train[i * self.batch_size: (i+1) * self.batch_size]
batch_y = y_train[i * self.batch_size: (i+1) * self.batch_size]
curr_loss, curr_acc = model.train_on_batch(batch_x, batch_y)
if i == 0:
training_loss, training_acc = curr_loss, curr_acc
else:
training_loss = (i * training_loss + 1 * curr_loss) / float(i+1)
training_acc = (i * training_acc + 1 * curr_acc) / float(i+1)
if (i+1) % save_interval == 0:
current_freq = (i+1) // save_interval
model.save_weights('{}/models/original-{}-{}.hdf5'.format(self.data, e,current_freq))
print('Model saved at Epoch {}, Step {}'.format(e, i))
if (i+1) % val_interval == 0:
current_itr = val_batch_itr % num_val_iters
batch_x = dataset.x_val[current_itr * self.batch_size:(current_itr+1) * self.batch_size]
batch_y = dataset.y_val[current_itr * self.batch_size:(current_itr+1) * self.batch_size]
current_loss, current_acc = model.test_on_batch(batch_x, batch_y)
if val_batch_itr == 0:
val_loss, val_acc = current_loss, current_acc
else:
val_loss = (val_batch_itr * val_loss + current_loss) / float(val_batch_itr+1)
val_acc = (val_batch_itr * val_acc + current_acc) / float(val_batch_itr+1)
val_batch_itr += 1
print('Epoch: {} Step: {}; train_loss {}; train_acc {}; val_loss {}; val_acc {}'.format(e, i, training_loss, training_acc,val_loss, val_acc))
model.save_weights('{}/models/original-{}.hdf5'.format(self.data, e))
entire_val_loss, entire_val_acc = model.evaluate(dataset.x_val, dataset.y_val, verbose=0)
print('Epoch: {}; loss {}; acc {}'.format(epoch, val_loss, val_acc))
print('Epoch: {}; entire loss {}; acc {}'.format(epoch, entire_val_loss, entire_val_acc))
print('Saving model at the end of the epoch...')
def train_augment(self, dataset, new_data, method, changing_way):
print('Training model on augmented data...')
if self.data == 'imdbcnn':
epochs = 8
batch_size = 40
filepath = '{}/models/augment_{}_{}.h5'.format(self.data, method, changing_way)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
x = np.vstack([dataset.x_train, new_data[0]])
y = np.vstack([dataset.y_train, new_data[1]])
idx = np.random.permutation(len(x))
x = np.array(x)[idx]; y = np.array(y)[idx]
self.pred_model.fit(
x,
y,
validation_data=(dataset.x_val, dataset.y_val),
callbacks = callbacks_list,
epochs=epochs,
batch_size=batch_size
)
def predict(self, x, verbose=0):
if self.data in ['imdbcnn','yahoolstm']:
if type(x) == list or x.shape[1] < self.maxlen:
x = np.array(sequence.pad_sequences(x, maxlen=self.maxlen))
return self.pred_model.predict(x, batch_size = 2500,
verbose = verbose)
elif self.data == 'agccnn':
# x should be a list of texts.
if isinstance(x[0], basestring):
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
predictions = []
for batch_data in generator:
predictions.append(self.model.predict(batch_data, verbose = verbose))
return np.concatenate(predictions, axis = 0)
return self.model.predict(x, verbose = verbose)
def compute_gradients(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 400
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
grads_val = []
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
batch_grads = self.sess.run(self.grads, feed_dict = {self.emb_ph: batch_emb}) # [num_classes, batchsize, maxlen, embedding_dims]
grads_val.append(batch_grads)
grads_val = np.concatenate(grads_val, axis = 1)
# [num_classes, num_data, maxlen, embedding_dims]
pred_val = self.predict(x)
# [num_data, maxlen, embedding_dims]
gradients = grads_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
return gradients #np.sum(abs(class_specific_scores), axis = -1)
elif self.data == 'agccnn':
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
grads_val = []
for s, batch_data in enumerate(generator):
grads_val.append(self.sess.run(self.grads, feed_dict = {self.input_ph: batch_data}))
# [num_classes, num_data, charlen, embedding_dims]
grads_val = np.concatenate(grads_val, axis = 1)
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_grads = grads_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
return class_specific_grads
def compute_taylor_approximation(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 128
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
approxs_val = []
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
batch_approxs = self.sess.run(self.approxs, feed_dict = {self.emb_ph: batch_emb}) # [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
approxs_val = np.concatenate(approxs_val, axis = 1)
# [num_classes, num_data, length, embedding_dims]
pred_val = self.predict(x)
# [num_data, length, embedding_dims]
class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [num_data, length]
return np.sum(class_specific_scores, axis = -1)
elif self.data == 'agccnn':
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = 128)
approxs_val = []
indices = []
for s, batch_data in enumerate(generator):
approxs_val.append(self.sess.run(self.approxs, feed_dict = {self.input_ph: batch_data}))
for sent in x[128 * s: 128 * (s+1)]:
indices.append(self.find_words_positions(sent))
# [num_classes, num_data, charlen, embedding_dims]
approxs_val = np.concatenate(approxs_val, axis = 1)
# print(np.sum(approxs_val[0] != 0, axis = -1))
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_approxs = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
approxs_score = []
for i, approxs_val in enumerate(class_specific_approxs):
approx_score = [np.sum(np.sum(approxs_val[start_idx:end_idx], axis = 0), axis = 0) for start_idx, end_idx in indices[i]] # [wordlen]
approxs_score.append(np.array(approx_score))
# print(np.array(approx_score).shape)
return approxs_score
def compute_integrated_gradients(self, x):
if self.data in ['imdbcnn','yahoolstm']:
batchsize = 20#128 if self.data == 'imdbcnn' else 40
steps = 10
approxs_val = []
emb_vals = []
num_iters1 = int(math.ceil(len(x) * 1.0 / batchsize))
for i in range(num_iters1):
batch_data = x[i * batchsize: (i+1) * batchsize]
batch_emb = self.sess.run(self.emb_out,
feed_dict = {self.input_ph: batch_data})
step_batch_emb = [batch_emb * float(s) / steps for s in range(1, steps+1)]
# [steps,batchsize, maxlen, embedding_dimension]
emb_vals.append(step_batch_emb)
emb_vals = np.concatenate(emb_vals, axis = 1)
# [steps, num_data, maxlen, embedding_dimension]
emb_vals = np.reshape(emb_vals, [-1, self.maxlen, self.embedding_dims])
num_iters = int(math.ceil(len(emb_vals) * 1.0 / batchsize))
for i in range(num_iters):
print(i)
batch_emb = emb_vals[i * batchsize: (i+1) * batchsize]
batch_approxs = self.sess.run(self.approxs, feed_dict = {self.emb_ph: batch_emb})
# [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
approxs_val = np.concatenate(approxs_val, axis = 1)
# [num_classes, steps * num_data, length, embedding_dims]
approxs_val = np.reshape(approxs_val,
[self.num_classes, steps, len(x), self.maxlen, self.embedding_dims])
approxs_val = np.mean(approxs_val, axis = 1)
pred_val = self.predict(x)
# [num_data, length, embedding_dims]
class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [num_data, length]
return np.sum(class_specific_scores, axis = -1)
elif self.data == 'agccnn':
batchsize = 128
generator = self.construct_batch_generator(x, self.vocab, self.vocab_size, self.vocab_check, self.charlen, batchsize = batchsize)
steps = 100
approxs_val = []
indices = []
for s, batch_data in enumerate(generator):
emb_vals = [batch_data * float(step) / steps for step in range(1, steps+1)]
batch_approxs = np.mean([self.sess.run(self.approxs, feed_dict = {self.input_ph: emb_val_s}) for emb_val_s in emb_vals], axis = 0)
# [num_classes, batchsize, maxlen, embedding_dims]
approxs_val.append(batch_approxs)
for sent in x[batchsize * s: batchsize * (s+1)]:
indices.append(self.find_words_positions(sent))
# [num_classes, num_data, charlen, embedding_dims]
approxs_val = np.concatenate(approxs_val, axis = 1)
pred_val = self.predict(x)
# [num_data, charlen, embedding_dims]
class_specific_approxs = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
approxs_score = []
for i, approxs_val in enumerate(class_specific_approxs):
approx_score = [np.sum(np.sum(approxs_val[start_idx:end_idx], axis = 0), axis = 0) for start_idx, end_idx in indices[i]] # [wordlen]
approxs_score.append(np.array(approx_score))
return approxs_score
| [
"noreply@github.com"
] | noreply@github.com |
8ac060b920fdbfb4883e5b8557a11dcfbd7bdef8 | 47b739ab1293f7c6244ac70b438bfdcff61fe6fb | /question_model.py | a7024d38bdba531547ddbfb9a8059e3f9b1b5547 | [] | no_license | TotaltEcplise/Quiz_oop | c542d0dc0681f178c3710215c44c0b8e9f4d78b8 | 6c4ee83d3847495242d04e223371a8e605b1587f | refs/heads/master | 2023-03-07T06:42:00.136781 | 2021-02-22T13:16:18 | 2021-02-22T13:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | class Question:
def __init__(self, q_text, q_answer):
self.text = q_text
self.answer = q_answer
| [
"77200688+huserdil@users.noreply.github.com"
] | 77200688+huserdil@users.noreply.github.com |
80e786872143779e1fca9b83e5ab6e2b6162d70c | 7c9a3e527c8e444c1be745a26c5803ded4977bf4 | /trades/migrations/0002_auto__add_trade__add_item.py | 714ab2836c1c559a46a5712e7c66496902cb44e8 | [] | no_license | sekl/esotrades | 5b536fb9632ebecdca3a479a5d255c513b8078c1 | 683f8f8e29f89eb0ef55ec56544a4d07a1944077 | refs/heads/master | 2016-09-09T19:52:38.898875 | 2014-03-08T12:26:49 | 2014-03-08T12:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Trade'
db.create_table(u'trades_trade', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('body', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'trades', ['Trade'])
# Adding model 'Item'
db.create_table(u'trades_item', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'trades', ['Item'])
def backwards(self, orm):
# Deleting model 'Trade'
db.delete_table(u'trades_trade')
# Deleting model 'Item'
db.delete_table(u'trades_item')
models = {
u'trades.item': {
'Meta': {'object_name': 'Item'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'trades.trade': {
'Meta': {'object_name': 'Trade'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['trades'] | [
"sebastian.klier@gmx.de"
] | sebastian.klier@gmx.de |
582ef42efdfd7e420d4b88d395d1bce8c2254139 | 55826466383423f170e4fe8999e60670edd53704 | /script/DRQN_hindsight/2d/DRQN_hindsight_2D_static.py | cb2b14f8814f460763481320ae10f2931a84ed39 | [] | no_license | siyuan2018/SNAC | c48dc7ced78f30bc6847025b8637337737bd3467 | 049c0566e2c154f93b5015a1a4607fdb8b4be117 | refs/heads/main | 2023-03-28T20:25:53.886811 | 2021-04-05T18:19:46 | 2021-04-05T18:19:46 | 362,850,937 | 1 | 0 | null | 2021-04-29T14:46:51 | 2021-04-29T14:46:50 | null | UTF-8 | Python | false | false | 12,543 | py | import sys
import torch
import torch.nn as nn
import numpy as np
import pickle
import random
import time
import os
from collections import deque
sys.path.append('../../Env/2D/')
from DMP_Env_2D_static import deep_mobile_printing_2d1r
from DMP_Env_2D_static_hindsight_replay import deep_mobile_printing_2d1r_hindsight
# plan_choose: 0 Dense circle, 1 Sparse circle
plan_choose = 0
log_path = "./log/DRQN_hindsight/2D/Static/plan_"+str(plan_choose)+"/"
if os.path.exists(log_path) == False:
os.makedirs(log_path)
print('2D_Static')
print('plan_choose:',plan_choose)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = deep_mobile_printing_2d1r(plan_choose=plan_choose)
env_hindsight = deep_mobile_printing_2d1r_hindsight(plan_choose=plan_choose)
print("device_using:", device)
######################
# hyper parameter
minibatch_size=64
Lr=0.00001
N_iteration=10000
N_iteration_test=10
alpha=0.9
Replay_memory_size=1000
Update_traget_period=200
Action_dim=env.action_dim
State_dim=env.state_dim
hidden_state_dim=256
Time_step=20
UPDATE_FREQ=5
INITIAL_EPSILON = 0.2
FINAL_EPSILON = 0.0
######################
use_hindsight=True
print("State_dim:",State_dim)
print("plan_width:", env.plan_width)
print("^^^^^^^^^^^^^^^^^^^^^^^^^^")
def iou(environment_memory,environment_plan,HALF_WINDOW_SIZE,plan_height,plan_width):
component1=environment_plan[HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_height,\
HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_width].astype(bool)
component2=environment_memory[HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_height,\
HALF_WINDOW_SIZE:HALF_WINDOW_SIZE+plan_width].astype(bool)
overlap = component1*component2 # Logical AND
union = component1 + component2 # Logical OR
IOU = overlap.sum()/float(union.sum())
return IOU
def get_and_init_FC_layer(din, dout):
li = nn.Linear(din, dout)
li.weight.data.normal_(0, 0.1)
return li
class Q_NET(nn.Module):
def __init__(self,out_size,hidden_size):
super(Q_NET, self).__init__()
self.out_size = out_size
self.hidden_size = hidden_size
self.fc_1 = get_and_init_FC_layer(State_dim, 64)
self.fc_2 = get_and_init_FC_layer(64, 128)
self.fc_3 = get_and_init_FC_layer(128, 128)
self.rnn=nn.LSTM(128,hidden_size,num_layers=1,batch_first=True)
self.adv = get_and_init_FC_layer(hidden_size, self.out_size)
self.val = get_and_init_FC_layer(hidden_size, 1)
self.relu = nn.ReLU()
def forward(self,x,bsize,time_step,hidden_state,cell_state):
x=x.view(bsize*time_step,State_dim)
x = self.fc_1(x)
x = self.relu(x)
x = self.fc_2(x)
x = self.relu(x)
x = self.fc_3(x)
x = self.relu(x)
x = x.view(bsize,time_step,128)
lstm_out = self.rnn(x,(hidden_state,cell_state))
out = lstm_out[0][:,time_step-1,:]
h_n = lstm_out[1][0]
c_n = lstm_out[1][1]
adv_out = self.adv(out)
val_out = self.val(out)
qout = val_out.expand(bsize,self.out_size) + (adv_out - adv_out.mean(dim=1).unsqueeze(dim=1).expand(bsize,self.out_size))
return qout, (h_n,c_n)
def init_hidden_states(self,bsize):
h = torch.zeros(1,bsize,self.hidden_size).float().to(device)
c = torch.zeros(1,bsize,self.hidden_size).float().to(device)
return h,c
class Memory():
def __init__(self,memsize):
self.memsize = memsize
self.memory = deque(maxlen=self.memsize)
def add_episode(self,epsiode):
self.memory.append(epsiode)
def get_batch(self,bsize,time_step):
sampled_epsiodes = random.sample(self.memory,bsize)
batch = []
for episode in sampled_epsiodes:
point = np.random.randint(0,len(episode)+1-time_step)
batch.append(episode[point:point+time_step])
return batch
class DQN_AGNET():
def __init__(self,device):
self.device=device
self.Eval_net= Q_NET(Action_dim,hidden_size=hidden_state_dim).to(device)
self.Target_net = Q_NET(Action_dim,hidden_size=hidden_state_dim).to(device)
self.learn_step = 0 # counting the number of learning for update traget periodiclly
# counting the transitions
self.optimizer = torch.optim.Adam(self.Eval_net.parameters(), lr=Lr)
self.loss = nn.SmoothL1Loss()
self.loss_his=[]
self.greedy_epsilon=0.2
self.replaymemory=Memory(Replay_memory_size)
def choose_action(self,s,hidden_state,cell_state):
state=torch.from_numpy(s).float().to(self.device)
choose=np.random.uniform()
if choose<=self.greedy_epsilon:
model_out = self.Eval_net.forward(state,bsize=1,time_step=1,hidden_state=hidden_state,cell_state=cell_state)
action=np.random.randint(0, Action_dim)
hidden_state = model_out[1][0]
cell_state = model_out[1][1]
else:
model_out = self.Eval_net.forward(state,bsize=1,time_step=1,hidden_state=hidden_state,cell_state=cell_state)
out = model_out[0]
action = int(torch.argmax(out[0]))
hidden_state = model_out[1][0]
cell_state = model_out[1][1]
return action, hidden_state, cell_state
def learning_process(self):
self.optimizer.zero_grad()
self.Eval_net.train()
if self.learn_step% Update_traget_period == 0:
self.Target_net.load_state_dict(self.Eval_net.state_dict())
hidden_batch, cell_batch = self.Eval_net.init_hidden_states(bsize=minibatch_size)
batch = self.replaymemory.get_batch(bsize=minibatch_size,time_step=Time_step)
current_states = []
acts = []
rewards = []
next_states = []
for b in batch:
cs,ac,rw,ns,ep = [],[],[],[],[]
for element in b:
cs.append(element[0])
ac.append(element[1])
rw.append(element[2])
ns.append(element[3])
current_states.append(cs)
acts.append(ac)
rewards.append(rw)
next_states.append(ns)
current_states = np.array(current_states)
acts = np.array(acts)
rewards = np.array(rewards)
next_states = np.array(next_states)
torch_current_states = torch.from_numpy(current_states).float().to(self.device)
torch_acts = torch.from_numpy(acts).long().to(self.device)
torch_rewards = torch.from_numpy(rewards).float().to(self.device)
torch_next_states = torch.from_numpy(next_states).float().to(self.device)
Q_s, _ = self.Eval_net.forward(torch_current_states,bsize=minibatch_size,time_step=Time_step,hidden_state=hidden_batch,cell_state=cell_batch)
Q_s_a = Q_s.gather(dim=1,index=torch_acts[:,Time_step-1].unsqueeze(dim=1)).squeeze(dim=1)
Q_next,_ = self.Target_net.forward(torch_next_states,bsize=minibatch_size,time_step=Time_step,hidden_state=hidden_batch,cell_state=cell_batch)
Q_next_max,__ = Q_next.detach().max(dim=1)
target_values = torch_rewards[:,Time_step-1] + (alpha * Q_next_max)
loss = self.loss(Q_s_a, target_values)
loss.backward()
self.optimizer.step()
self.learn_step+=1
self.loss_his.append(loss.item())
#### initial fill the replaymemory
# device = torch.device("cpu")
agent=DQN_AGNET(device)
for i in range(0,Replay_memory_size):
prev_state = env.reset()
local_memory = []
while True:
action = np.random.randint(0,Action_dim)
next_state,reward,done = env.step(action)
local_memory.append((prev_state,action,reward,next_state))
prev_state = next_state
if done:
break
agent.replaymemory.add_episode(local_memory)
agent.greedy_epsilon=INITIAL_EPSILON
print("agent greedy_epsilon", agent.greedy_epsilon)
best_reward=-500
total_steps = 0
reward_history_train=[]
reward_history_test=[]
iou_history_train=[]
iou_history_test=[]
for episode in range(N_iteration):
state = env.reset()
# print("plan",env.one_hot)
print("total_brick",env.total_brick)
reward_train = 0
step_size_memory=[]
start_time = time.time()
local_memory=[]
hidden_state, cell_state = agent.Eval_net.init_hidden_states(bsize=1)
while True:
total_steps +=1
action,hidden_state_next, cell_state_next = agent.choose_action(state,hidden_state, cell_state)
state_next, r, done = env.step(action)
step_size_memory.append(env.step_size)
local_memory.append((state, action, r, state_next))
reward_train += r
if total_steps % UPDATE_FREQ == 0:
agent.learning_process()
if done:
reward_history_train.append(reward_train)
break
state = state_next
hidden_state, cell_state = hidden_state_next, cell_state_next
agent.replaymemory.add_episode(local_memory)
iou_train=iou(env.environment_memory,env.plan,env.HALF_WINDOW_SIZE,env.plan_height,env.plan_width)
iou_history_train.append(iou_train)
#### hindsight
if use_hindsight:
local_memory_hindsight=[]
_ = env_hindsight.reset()
env_hindsight.plan[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]=env.environment_memory[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]
env_hindsight.input_plan= env_hindsight.plan[env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_height,\
env.HALF_WINDOW_SIZE:env.HALF_WINDOW_SIZE+env.plan_width]
for i,element in enumerate(local_memory):
_, r, _ = env_hindsight.step(element[1],step_size_memory[i])
local_memory_hindsight.append((element[0],element[1],r,element[3]))
agent.replaymemory.add_episode(local_memory_hindsight)
############ test agent
iou_test=0
reward_test_total=0
start_time_test = time.time()
for _ in range(N_iteration_test):
state = env.reset()
reward_test=0
hidden_state, cell_state = agent.Eval_net.init_hidden_states(bsize=1)
while True:
action, hidden_state_next, cell_state_next = agent.choose_action(state, hidden_state, cell_state)
state_next, r, done = env.step(action)
reward_test += r
if done:
break
state = state_next
hidden_state, cell_state = hidden_state_next, cell_state_next
reward_test_total += reward_test
iou_test += iou(env.environment_memory, env.plan, env.HALF_WINDOW_SIZE, env.plan_height, env.plan_width)
reward_test_total = reward_test_total / N_iteration_test
secs = int(time.time() - start_time)
mins = secs / 60
secs = secs % 60
print('Epodise: ', episode,
'| Ep_reward_test:', reward_test_total, '| Ep_IOU_test: ', iou_test / N_iteration_test)
print(" | time in %d minutes, %d seconds\n" % (mins, secs))
reward_history_test.append(reward_test_total)
iou_history_test.append(iou_test / N_iteration_test)
if agent.greedy_epsilon > FINAL_EPSILON:
agent.greedy_epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/N_iteration
if reward_test_total > best_reward:
torch.save(agent.Eval_net.state_dict(), log_path+'Eval_net_episode_%d.pth' % (episode))
torch.save(agent.Target_net.state_dict(), log_path+'Target_net_episode_%d.pth' % (episode))
best_reward=reward_test_total
with open(log_path+"reward_his_train.pickle", "wb") as fp:
pickle.dump(reward_history_train, fp)
with open(log_path+"reward_his_test.pickle", "wb") as fp:
pickle.dump(reward_history_test, fp)
with open(log_path+"loss.pickle", "wb") as fp:
pickle.dump(agent.loss_his, fp)
with open(log_path+"iou_train_history.pickle", "wb") as fp:
pickle.dump(iou_history_train, fp)
with open(log_path+"iou_test_history.pickle", "wb") as fp:
pickle.dump(iou_history_test, fp)
| [
"55855538+WenyuHan-LiNa@users.noreply.github.com"
] | 55855538+WenyuHan-LiNa@users.noreply.github.com |
3ce8513dc49bbd7f39174d24d24c9ef059686e0d | 805fe894bbe1d4072a2b083c4d874d0566cd69d0 | /woocommerce.py | 36e1aeb5ac57b29aa5754c1182b1e8941922b6c0 | [] | no_license | Popss2701/Lite | dce6d354008d69c37f2fb20689315a5356f8b0a9 | 817a322bdcca80bc1f9ce0b25e3aa6dd97e70007 | refs/heads/master | 2023-03-27T11:46:55.768279 | 2021-03-23T18:48:37 | 2021-03-23T18:48:37 | 290,103,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271,230 | py | import copy
import csv
import re
from collections import defaultdict
from urllib.parse import unquote
import phpserialize
import chardet
from cartmigration.libs.utils import *
from cartmigration.models.cart.wordpress import LeCartWordpress
# tested with woocommerce335
class LeCartWoocommerce(LeCartWordpress):
WARNING_VARIANT_LIMIT = 100
def __init__(self, data = None):
super().__init__(data)
self.product_types = dict()
self.is_variant_limit = False
def display_config_source(self):
parent = super().display_config_source()
url_query = self.get_connector_url('query')
self._notice['src']['language_default'] = 1
self._notice['src']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['src']['language_default']] = 0
self._notice['src']['store_category'] = storage_cat_data
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['category_map'] = False
self._notice['src']['support']['attribute_map'] = False
self._notice['src']['support']['wpml'] = False
self._notice['src']['support']['yoast_seo'] = False
self._notice['src']['support']['manufacturers'] = False
self._notice['src']['support']['product_bundle'] = False
self._notice['src']['support']['customer_point_rewards'] = False
self._notice['src']['support']['addons'] = False
self._notice['src']['support']['plugin_pre_ord'] = False
self._notice['src']['support']['plugin_order_status'] = False
self._notice['src']['support']['custom_order_status'] = False
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data:
if "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['src']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['src']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
else:
self._notice['src']['support']['wpml'] = False
if 'woocommerce-brand/main.php' in active_plugin_v_data or "wc-brand/woocommerce-brand.php" in active_plugin_v_data or 'woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data or 'perfect-woocommerce-brands/perfect-woocommerce-brands.php' in active_plugin_v_data:
self._notice['src']['support']['manufacturers'] = True
if "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['src']['support']['yoast_seo'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['src']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['src']['support']['customer_point_rewards'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['src']['support']['addons'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['src']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and 'woocommerce-order-status-manager/woocommerce-order-status-manager.php' in active_plugin_v_data:
self._notice['src']['support']['plugin_order_status'] = True
if active_plugin_v_data and 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['src']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
},
'permalink_structure': {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE option_name = 'woocommerce_permalinks' OR option_name = 'category_base'",
}
}
if self._notice['src']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG'"
}
if self._notice['src']['support']['plugin_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_order_status'"
if self._notice['src']['support']['custom_order_status']:
queries_config['orders_status']['query'] = "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
config = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(queries_config)
})
language_data = dict()
order_status_data = dict()
product_base = 'product'
product_category_base = 'product-category'
category_base = ''
if config and config['result'] == 'success':
if config['data']['orders_status']:
for order_status_row in config['data']['orders_status']:
# order_status_id = 'wc-' + order_status_row['name'].lower()
# order_status_data[order_status_id] = order_status_row['name']
if self._notice['src']['support']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif self._notice['src']['support']['plugin_order_status']:
order_status_id = order_status_row['post_name']
order_status_data[order_status_id] = order_status_row['post_title']
else:
order_status_id = order_status_row['post_status']
order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['src']['support']['wpml']:
if not self._notice['src']['language_default'] and 'default_lang' in config['data'] and config['data'][
'default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['src']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
if config['data']['permalink_structure']:
product_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'woocommerce_permalinks')
category_base_data = get_row_from_list_by_field(config['data']['permalink_structure'], 'option_name', 'category_base')
if product_base_data:
option_value_data = php_unserialize(product_base_data['option_value'])
if option_value_data:
product_base = get_value_by_key_in_dict(option_value_data, 'product_base', 'product')
product_category_base = get_value_by_key_in_dict(option_value_data, 'category_base', 'product-category')
if category_base_data:
category_base = category_base_data['option_value']
self._notice['src']['config']['category_base'] = product_category_base
self._notice['src']['config']['product_category_base'] = product_category_base
self._notice['src']['config']['product_base'] = product_base
self._notice['src']['support']['language_map'] = True
self._notice['src']['languages'] = language_data
self._notice['src']['order_status'] = order_status_data
self._notice['src']['support']['order_status_map'] = True
self._notice['src']['support']['country_map'] = False
self._notice['src']['support']['add_new'] = True
self._notice['src']['support']['site_map'] = False
self._notice['src']['support']['customer_group_map'] = False
self._notice['src']['support']['languages_select'] = True
self._notice['src']['support']['order_state_map'] = True
self._notice['src']['support']['seo'] = True
if self.is_woo2woo():
self._notice['src']['support']['cus_pass'] = False
else:
self._notice['src']['support']['cus_pass'] = True
self._notice['src']['support']['coupons'] = True
self._notice['src']['support']['pages'] = True
self._notice['src']['support']['seo_301'] = True
self._notice['src']['config']['seo_module'] = self.get_list_seo()
return response_success()
def display_config_target(self):
url_query = self.get_connector_url('query')
self._notice['target']['language_default'] = 1
self._notice['target']['category_root'] = 1
storage_cat_data = dict()
storage_cat_data[self._notice['target']['language_default']] = 0
self._notice['target']['store_category'] = storage_cat_data
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['category_map'] = False
self._notice['target']['support']['attribute_map'] = False
self._notice['target']['support']['wpml'] = False
self._notice['target']['support']['wpml_currency'] = False
self._notice['target']['support']['product_bundle'] = False
self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['addons'] = False
self._notice['target']['support']['customer_point_rewards'] = False
self._notice['target']['support']['polylang'] = False
self._notice['target']['support']['polylang_product'] = False
self._notice['target']['support']['polylang_category'] = False
self._notice['target']['support']['plugin_woo_admin'] = False
self._notice['target']['support']['custom_order_status'] = False
self._notice['target']['currency_map'] = dict()
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
active_langs = list()
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
active_langs = option_value['active_languages'].values()
query_active_currency = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = '_wcml_settings'"
}
options_currency_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_currency)})
if options_currency_data and options_currency_data['data']:
currency_value = php_unserialize(options_currency_data['data'][0]['option_value'])
if currency_value and 'enable_multi_currency' in currency_value and to_int(currency_value['enable_multi_currency']) >= 2:
self._notice['target']['support']['wpml_currency'] = True
if 'default_currencies' in currency_value and currency_value['default_currencies']:
self._notice['target']['currency_map'] = currency_value['default_currencies']
else:
self._notice['target']['support']['wpml_currency'] = False
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'martfury-addons/martfury-addons.php', 'taxonomy': 'product_brand'},
{'name': 'woocommerce-brands/woocommerce-brands.php', 'taxonomy': 'product_brand'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
{'name': 'perfect-woocommerce-brands/perfect-woocommerce-brands.php', 'taxonomy': 'pwb-brand'},
]
self._notice['target']['config']['brand_taxonomy'] = 'product_brand'
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = True
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
# if ('woocommerce-brand/main.php' in active_plugin_v_data) or ("wc-brand/woocommerce-brand.php" in active_plugin_v_data) or ('woocommerce-brands/woocommerce-brands.php' in active_plugin_v_data) or ('brands-for-woocommerce/woocommerce-brand.php' in active_plugin_v_data):
# self._notice['target']['support']['manufacturers'] = True
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data) or ("woocommerce-sequential-order-numbers-pro/woocommerce-sequential-order-numbers-pro.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
# query_check_seo = {
# 'type': 'select',
# 'query': "SHOW TABLES LIKE '_DBPRF_lecm_rewrite';"
# }
# check_table_exit = self.select_data_connector(query_check_seo, 'seo')
# if check_table_exit['result'] == 'success' and to_len(check_table_exit['data']) > 0:
# self._notice['target']['support']['seo_301'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data or 'woo-product-bundle/index.php' in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-points-and-rewards/woocommerce-points-and-rewards.php" in active_plugin_v_data:
self._notice['target']['support']['customer_point_rewards'] = True
# if 'polylang/polylang.php' in active_plugin_v_data and 'polylang-wc/polylang-wc.php' in active_plugin_v_data:
if 'polylang/polylang.php' in active_plugin_v_data:
self._notice['target']['support']['polylang'] = True
if 'woocommerce-status-actions/woocommerce-status-actions.php' in active_plugin_v_data:
self._notice['target']['support']['custom_order_status'] = True
queries_config = {
'orders_status': {
'type': 'select',
# 'query': "SELECT DISTINCT(`post_status`) FROM `_DBPRF_posts` WHERE `post_type` = 'shop_order'",
'query': "SELECT * FROM `_DBPRF_term_taxonomy` AS term_taxonomy LEFT JOIN _DBPRF_terms AS terms ON term_taxonomy.term_id = terms.term_id WHERE term_taxonomy.taxonomy = 'shop_order_status'",
},
}
if self._notice['target']['support']['wpml']:
queries_config['wpml'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_icl_languages` WHERE code IN " + self.list_to_in_condition(active_langs)
}
queries_config['default_lang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` o LEFT JOIN _DBPRF_icl_languages il ON o.option_value = il.default_locale WHERE o.`option_name` = 'WPLANG' and o.`option_value` != '' "
}
if self._notice['target']['support']['polylang']:
queries_config['polylang'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'language'"
}
queries_config['polylang_categories'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_terms` as t LEFT JOIN `_DBPRF_term_taxonomy` as tx ON t.term_id = tx.term_id WHERE tx.taxonomy = 'term_language'"
}
if self._notice['target']['support']['custom_order_status']:
queries_config['custom_order_status'] = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` WHERE `post_type` = 'wc_custom_statuses' AND `post_status` = 'publish'"
}
config = self.select_multiple_data_connector(queries_config)
if 'polylang' in config['data'] and not config['data']['polylang']:
self._notice['target']['support']['polylang'] = False
language_data = dict()
order_status_data = dict()
polylang_products = dict()
polylang_categories = dict()
if config and config['result'] == 'success':
if self._notice['target']['support']['custom_order_status'] and config['data']['custom_order_status'] and to_len(config['data']['custom_order_status']) > 0:
for order_status_row in config['data']['custom_order_status']:
order_status_id = 'wc-' + to_str(order_status_row['post_name'])
order_status_data[order_status_id] = order_status_row['post_title']
elif config['data']['orders_status'] and to_len(config['data']['orders_status']) > 0:
for order_status_row in config['data']['orders_status']:
order_status_id = 'wc-' + to_str(order_status_row['name']).lower()
order_status_data[order_status_id] = order_status_row['name']
# order_status_id = order_status_row['post_status']
# order_status_data[order_status_id] = self.get_order_status_label(order_status_row['post_status'])
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
if self._notice['target']['support']['wpml']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'wpml' in config['data']:
if config['data']['wpml']:
for lang_row in config['data']['wpml']:
lang_id = lang_row["code"]
language_data[lang_id] = lang_row['english_name']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
elif self._notice['target']['support']['polylang']:
if not self._notice['target']['language_default'] and 'default_lang' in config['data'] and config['data']['default_lang']:
for lang_default_row in config['data']['default_lang']:
if lang_default_row['code']:
self._notice['target']['language_default'] = lang_default_row['code']
if 'polylang' in config['data']:
if config['data']['polylang']:
self._notice['target']['language_default'] = 'en'
for lang_row in config['data']['polylang']:
lang_id = lang_row['slug']
language_data[lang_id] = lang_row['name']
lang_product = lang_row['slug']
polylang_products[lang_product] = lang_row['term_taxonomy_id']
if config['data']['polylang_categories']:
for lang_row in config['data']['polylang_categories']:
lang_category = lang_row['slug'].replace('pll_', '')
polylang_categories[lang_category] = lang_row['term_taxonomy_id']
else:
lang_id = 'en'
language_data[lang_id] = "Default language"
else:
lang_id = 1
language_data[lang_id] = "Default language"
else:
order_status_data = {
'wc-pending': 'Pending payment',
'wc-processing': 'Processing',
'wc-on-hold': 'On hold',
'wc-completed': 'Completed',
'wc-cancelled': 'Cancelled',
'wc-refunded': 'Refunded',
'wc-failed': 'Failed'
}
lang_id = 1
language_data[lang_id] = "Default language"
self._notice['target']['support']['manufacturers'] = True
self._notice['target']['support']['check_manufacturers'] = True
# self._notice['target']['support']['yoast_seo'] = False
self._notice['target']['support']['pre_ord'] = True
self._notice['target']['support']['check_pre_ord'] = True
self._notice['target']['support']['seo'] = True
self._notice['target']['support']['check_seo'] = True
self._notice['target']['support']['seo_301'] = True
self._notice['target']['support']['check_seo_301'] = True
self._notice['target']['support']['cus_pass'] = True
self._notice['target']['support']['check_cus_pass'] = True
self._notice['target']['support']['language_map'] = True
self._notice['target']['languages'] = language_data
self._notice['target']['order_status'] = order_status_data
self._notice['target']['support']['order_status_map'] = True
self._notice['target']['support']['country_map'] = False
self._notice['target']['support']['add_new'] = True
self._notice['target']['support']['coupons'] = True
self._notice['target']['support']['blogs'] = True
self._notice['target']['support']['pages'] = True
self._notice['target']['support']['site_map'] = False
self._notice['target']['support']['pre_prd'] = False
self._notice['target']['support']['pre_cus'] = False
self._notice['target']['support']['img_des'] = True
self._notice['target']['support']['customer_group_map'] = False
self._notice['target']['support']['languages_select'] = True
self._notice['target']['support']['update_latest_data'] = True
self._notice['target']['config']['entity_update']['products'] = True
self._notice['target']['support']['polylang_product'] = polylang_products
self._notice['target']['support']['polylang_category'] = polylang_categories
return response_success()
def get_query_display_import_source(self, update = False):
compare_condition = ' > '
if update:
compare_condition = ' <= '
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
queries = {
# 'taxes': {
# 'type': 'select',
# 'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'",
# },
'manufacturers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE (taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand') AND term_id " + compare_condition + to_str(self._notice['process']['manufacturers']['id_src']),
},
'categories': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy WHERE taxonomy = 'product_cat' AND term_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
},
'products': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['products']['id_src']),
},
'customers': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND u.ID " + compare_condition + to_str(
self._notice['process']['customers']['id_src']),
},
'orders': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID " + compare_condition + to_str(
self._notice['process']['orders']['id_src']),
},
'reviews': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_comments AS cm,_DBPRF_posts AS p WHERE cm.comment_post_ID = p.ID AND p.post_type = 'product' AND cm.comment_ID " + compare_condition + to_str(
self._notice['process']['reviews']['id_src']),
},
'pages': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'page' AND ID " + compare_condition + to_str(self._notice['process']['pages']['id_src']),
},
'coupons': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'shop_coupon' AND ID " + compare_condition + to_str(self._notice['process']['coupons']['id_src']),
},
'blogs': {
'type': 'select',
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts WHERE post_type = 'post' AND ID " + compare_condition + to_str(self._notice['process']['blogs']['id_src']),
},
}
if self._notice['src']['support']['wpml']:
queries['categories'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# self._notice['process']['categories']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_taxonomy_id " + compare_condition + to_str(self._notice['process']['categories']['id_src']),
}
queries['products'] = {
'type': 'select',
# 'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE p.`ID` and il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID > " + to_str(
# self._notice['process']['products']['id_src']),
'query': "SELECT COUNT(1) AS count FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' and p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') AND p.ID " + compare_condition + to_str(self._notice['process']['products']['id_src']),
}
return queries
def display_import_source(self):
if self._notice['config']['add_new']:
self.display_recent_data()
queries = self.get_query_display_import_source()
count = self.get_connector_data(self.get_connector_url('query'), {
'serialize': True,
'query': json.dumps(queries)
})
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total'] = total
return response_success()
def display_update_source(self):
queries = self.get_query_display_import_source(True)
count = self.select_multiple_data_connector(queries, 'count')
if (not count) or (count['result'] != 'success'):
return response_error()
real_totals = dict()
for key, row in count['data'].items():
total = 0
if key == 'taxes':
if row and to_len(row) > 0:
taxes = row[0]['option_value'].splitlines()
total = (to_len(taxes) + 1) if taxes else 1
else:
total = self.list_to_count_import(row, 'count')
real_totals[key] = total
for key, total in real_totals.items():
self._notice['process'][key]['total_update'] = total
return response_success()
def display_import_target(self):
return response_success()
def prepare_import_target(self):
parent = super().prepare_import_target()
if parent['result'] != 'success':
return parent
query_active_plugins = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'active_plugins'"
}
active_plugins = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_plugins)})
if active_plugins and active_plugins['data']:
active_plugin = active_plugins['data'][0]
active_plugin_v = active_plugin['option_value']
if active_plugin_v:
active_plugin_v_data = php_unserialize(active_plugin_v)
if active_plugin_v_data and isinstance(active_plugin_v_data, dict):
active_plugin_v_data = list(active_plugin_v_data.values())
if active_plugin_v_data and "woocommerce-multilingual/wpml-woocommerce.php" in active_plugin_v_data:
self._notice['target']['support']['wpml'] = True
query_active_languages = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'icl_sitepress_settings'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_active_languages)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'default_language' in option_value:
self._notice['target']['language_default'] = option_value['default_language']
woo_brands = [
{'name': 'woocommerce-brand/main.php'},
{'name': 'wc-brand/woocommerce-brand.php'},
{'name': 'woocommerce-brands/woocommerce-brands.php'},
{'name': 'brands-for-woocommerce/woocommerce-brand.php', 'taxonomy': 'berocket_brand'},
{'name': 'perfect-woocommerce-brands/main.php', 'taxonomy': 'pwb-brand'},
]
for brand in woo_brands:
if brand['name'] in active_plugin_v_data:
self._notice['target']['support']['plugin_manufacturers'] = False
if brand.get('taxonomy'):
self._notice['target']['config']['brand_taxonomy'] = brand['taxonomy']
break
if active_plugin_v_data and (("woocommerce-sequential-order-numbers/woocommerce-sequential-order-numbers.php" in active_plugin_v_data) or ("custom-order-numbers-for-woocommerce/custom-order-numbers-for-woocommerce.php" in active_plugin_v_data) or ("sequential-order-numbers-for-woocommerce/sequential-order-numbers.php" in active_plugin_v_data)):
self._notice['target']['support']['plugin_pre_ord'] = True
if active_plugin_v_data and "wordpress-seo/wp-seo.php" in active_plugin_v_data:
self._notice['target']['support']['yoast_seo'] = True
if "themedelights-addons/themedelights-addons.php" in active_plugin_v_data or "woocommerce-product-addons/woocommerce-product-addons.php" in active_plugin_v_data:
self._notice['target']['support']['addons'] = True
if "leurlrewrite/leurlrewrite.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_seo'] = True
self._notice['target']['support']['plugin_seo_301'] = True
if "leprespass/leprespass.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_cus_pass'] = True
if "woo-product-bundle-premium/index.php" in active_plugin_v_data:
self._notice['target']['support']['product_bundle'] = True
if "woocommerce-admin/woocommerce-admin.php" in active_plugin_v_data:
self._notice['target']['support']['plugin_woo_admin'] = True
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if self._notice['target']['support']['wpml'] or self._notice['target']['support'].get('polylang'):
add_column = "ALTER TABLE " + self.get_table_name(TABLE_MAP) + " ADD `lang` VARCHAR(255)"
self.query_raw(add_column)
add_column = "ALTER TABLE _DBPRF_lecm_rewrite ADD `lang` VARCHAR(255)"
self.query_data_connector({'type': 'query', 'query': add_column})
return response_success()
def display_confirm_target(self):
self._notice['target']['clear']['function'] = 'clear_target_taxes'
self._notice['target']['clear_demo']['function'] = 'clear_target_products_demo'
return response_success()
# TODO clear demo
def clear_target_manufacturers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['manufacturers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_MANUFACTURER
}
manufacturers = self.select_obj(TABLE_MAP, where)
manufacturer_ids = list()
if manufacturers['result'] == 'success':
manufacturer_ids = duplicate_field_value_from_list(manufacturers['data'], 'id_desc')
if not manufacturer_ids:
return next_clear
manufacturer_id_con = self.list_to_in_condition(manufacturer_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' AND term_id IN " + manufacturer_id_con
}
manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if manufacturers['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + manufacturer_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return self._notice['target']['clear_demo']
def clear_target_categories_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['categories']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CATEGORY
}
categories = self.select_obj(TABLE_MAP, where)
category_ids = list()
if categories['result'] == 'success':
category_ids = duplicate_field_value_from_list(categories['data'], 'id_desc')
if not category_ids:
return next_clear
category_id_con = self.list_to_in_condition(category_ids)
taxonomy_meta_table = 'termmeta'
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' AND term_id IN " + category_id_con
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(collections_query)})
if categories['data']:
all_queries = list()
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + category_id_con
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'tax_product_cat' AND element_id IN " + category_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category' and type_id IN " + category_id_con
})
})
if all_queries:
self.import_multiple_data_connector(all_queries, 'cleardemo')
return next_clear
def clear_target_products_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
if not self._notice['config']['products']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_PRODUCT
}
products = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
product_ids = list()
if products['result'] == 'success':
product_ids = duplicate_field_value_from_list(products['data'], 'id_desc')
if not product_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
product_id_con = self.list_to_in_condition(product_ids)
collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_posts` "
"WHERE ID IN " + product_id_con + " OR post_parent IN " + product_id_con
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(collections_query)})
all_post_id = list()
if products['data']:
all_post_id = duplicate_field_value_from_list(products['data'], 'ID')
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` "
"WHERE ID IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_post_meta`"
" WHERE post_id IN " + self.list_to_in_condition(all_post_id)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
where = {
'migration_id': self._migration_id,
'type': self.TYPE_OPTION
}
attibutes = self.select_obj(TABLE_MAP, where)
attibutes_ids = list()
attibutes_codes = list()
if attibutes['result'] == 'success':
attibutes_ids = duplicate_field_value_from_list(attibutes['data'], 'id_desc')
attibutes_codes = duplicate_field_value_from_list(attibutes['data'], 'value')
if attibutes_ids:
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attibutes_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + self.list_to_in_condition(attibutes_codes)
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` "
"WHERE element_type = 'post_product' AND element_id IN " + product_id_con
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product' and type_id IN " + product_id_con
})
})
self.delete_map_demo(self.TYPE_PRODUCT, product_ids)
if product_ids and to_len(product_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_customers_demo(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['customers']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_CUSTOMER
}
customers = self.select_obj(TABLE_MAP, where)
customer_ids = list()
if customers['result'] == 'success':
customer_ids = duplicate_field_value_from_list(customers['data'], 'id_desc')
if not customer_ids:
return next_clear
customer_id_con = self.list_to_in_condition(customer_ids)
del_user_query = "DELETE FROM _DBPRF_users WHERE ID IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE FROM _DBPRF_usermeta WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE FROM _DBPRF_wc_customer_lookup WHERE user_id IN " + customer_id_con
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
return next_clear
def clear_target_orders_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_reviews_demo',
}
if not self._notice['config']['orders']:
self._notice['target']['clear_demo'] = next_clear
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_ORDER
}
orders = self.select_page(TABLE_MAP, where, self.LIMIT_CLEAR_DEMO)
order_ids = list()
if orders['result'] == 'success':
order_ids = duplicate_field_value_from_list(orders['data'], 'id_desc')
if not order_ids:
self._notice['target']['clear_demo'] = next_clear
return next_clear
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') AND ID IN " + self.list_to_in_condition(
order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE FROM `_DBPRF_post_meta` WHERE post_id IN " + self.list_to_in_condition(order_ids)
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
self.delete_map_demo(self.TYPE_ORDER, order_ids)
if order_ids and to_len(order_ids) < self.LIMIT_CLEAR_DEMO:
self._notice['target']['clear_demo'] = next_clear
return next_clear
return self._notice['target']['clear_demo']
def clear_target_reviews_demo(self):
next_clear = {
'result': 'success',
'function': 'clear_target_pages_demo',
}
self._notice['target']['clear_demo'] = next_clear
if not self._notice['config']['reviews']:
return next_clear
where = {
'migration_id': self._migration_id,
'type': self.TYPE_REVIEW
}
reviews = self.select_obj(TABLE_MAP, where)
review_ids = list()
if reviews['result'] == 'success':
review_ids = duplicate_field_value_from_list(reviews['data'], 'id_desc')
if not review_ids:
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
where = ''
if table == 'comments':
where = " WHERE comment_ID IN " + self.list_to_in_condition(review_ids)
if table == 'commentmeta':
where = " WHERE comment_id IN " + self.list_to_in_condition(review_ids)
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`" + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# TODO: clear
def clear_target_taxes(self):
next_clear = {
'result': 'process',
'function': 'clear_target_manufacturers',
'msg': ''
}
if not self._notice['config']['taxes']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'options',
'woocommerce_tax_rates',
'woocommerce_tax_rate_locations',
'wc_tax_rate_classes'
]
for table in tables:
if table == 'options':
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "UPDATE `_DBPRF_" + table + "` SET `option_value` = '' WHERE `option_name` = 'woocommerce_tax_classes'"
})
})
continue
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + table + "` WHERE 1"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_manufacturers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_categories',
'msg': ''
}
if not self._notice['config']['manufacturers']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
tables = ['termmeta', 'terms', 'term_relationships', 'term_taxonomy']
for table in tables:
where = ''
if table in ['termmeta', 'terms']:
where = " term_id IN (SELECT term_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table in ['term_relationships']:
where = " term_taxonomy_id IN (SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = " + self.escape(taxonomy) + " )"
if table == 'term_taxonomy':
where = " taxonomy = " + self.escape(taxonomy)
query = "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
clear_table = self.query_data_connector({'type': 'delete', 'query': query})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
# if manufacturers:
# while manufacturers['data']:
# if not manufacturers:
# return next_clear
# term_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
# all_queries = list()
# taxonomy_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_taxonomy_id')
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
# term_ids)
# })
# all_queries.append({
# 'type': 'query',
# 'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
# taxonomy_ids)
# })
# if all_queries:
# self.import_multiple_data_connector(all_queries, 'cleardemo')
# all_collections_query = {
# 'type': 'select',
# 'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_brand' OR taxonomy = 'brand' OR taxonomy = 'pwb-brand' LIMIT 200"
# }
# manufacturers = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if taxonomy in option_value:
option_value[taxonomy] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'manufacturer')
self._notice['target']['clear'] = next_clear
return next_clear
def clear_target_categories(self):
next_clear = {
'result': 'process',
'function': 'clear_target_products',
'msg': ''
}
if not self._notice['config']['categories']:
self._notice['target']['clear'] = next_clear
return next_clear
taxonomy_meta_table = 'termmeta'
while self._check_categories_exists():
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 200"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if not categories:
return next_clear
term_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(categories['data'], 'term_taxonomy_id')
taxnomy_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_" + taxonomy_meta_table + "` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(taxnomy_query)})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
})})
self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
})
})
# end for
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type = 'tax_product_cat'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'category'"
})
})
if self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if 'product_cat' in option_value:
option_value['product_cat'] = dict()
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
update_query = self.create_update_query_connector('options', data_set, where)
wpseo_taxonomy_clear = self.import_data_connector(update_query, 'category')
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_categories_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT term_taxonomy_id FROM `_DBPRF_term_taxonomy` WHERE taxonomy = 'product_cat' OR taxonomy = 'post_cat' LIMIT 1"
}
categories = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if categories['data'] else False
def _check_product_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('product', 'product_variation') LIMIT 1"
}
# products = self.get_connector_data(self.get_connector_url('query'),
# {'query': json.dumps(all_collections_query)})
products = self.select_data_connector(all_collections_query, 'products')
return True if products['data'] else False
def _check_attributes_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_products(self):
next_clear = {
'result': 'process',
'function': 'clear_target_customers',
'msg': ''
}
if not self._notice['config']['products']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_product_exists():
# clear posts(product)
# clear meta post(product)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN('product', 'product_variation')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
all_meta_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_postmeta` WHERE post_id NOT IN (SELECT ID FROM _DBPRF_posts)"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear attributes
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
while self._check_attributes_exists():
product_attribute_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_woocommerce_attribute_taxonomies` ORDER BY attribute_id LIMIT 200"
}
attributes = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(product_attribute_query)})
if (attributes['data']):
attribute_ids = duplicate_field_value_from_list(attributes['data'], 'attribute_id')
attribute_names = duplicate_field_value_from_list(attributes['data'], 'attribute_name')
attribute_names_condition = "('pa_" + "','pa_".join(attribute_names) + "')"
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_woocommerce_attribute_taxonomies` WHERE attribute_id IN " + self.list_to_in_condition(
attribute_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
term_query = {
"type": "select",
"query": "SELECT * FROM `_DBPRF_term_taxonomy` tt LEFT JOIN `_DBPRF_terms` t ON tt.term_id = t.term_id "
"WHERE tt.taxonomy IN " + attribute_names_condition
}
terms = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(term_query)})
if (terms['data']):
term_ids = duplicate_field_value_from_list(terms['data'], 'term_id')
taxonomy_ids = duplicate_field_value_from_list(terms['data'], 'term_taxonomy_id')
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_taxonomy` WHERE term_taxonomy_id IN " + self.list_to_in_condition(
taxonomy_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
del_transient_attr_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_terms` WHERE term_id IN " + self.list_to_in_condition(
term_ids)
}
self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(del_transient_attr_query)})
if self._notice['target']['support']['wpml']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_icl_translations` where element_type IN ('post_product','post_product_variation'"
})
})
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query',
'query': "DELETE FROM `_DBPRF_lecm_rewrite` where type = 'product'"
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_customers(self):
next_clear = {
'result': 'process',
'function': 'clear_target_orders',
'msg': ''
}
if not self._notice['config']['customers']:
self._notice['target']['clear'] = next_clear
return next_clear
# "DELETE FROM `wp_usermeta`
# WHERE meta_key IN ('wp_capabilities', 'wp_capabilities') AND meta_value = 'a:1:{s:8:"customer";b:1;}'"
del_user_query = "DELETE _DBPRF_users FROM _DBPRF_users " \
"LEFT JOIN _DBPRF_usermeta ON _DBPRF_users.ID = _DBPRF_usermeta.user_id " \
"WHERE _DBPRF_usermeta.meta_key IN ('_DBPRF_capabilities', '_DBPRF_capabilities') " \
"AND _DBPRF_usermeta.meta_value = 'a:1:{s:8:\"customer\";b:1;}'"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_query
})
})
if (not clear_table) or (clear_table['result'] != 'success') or (not clear_table['data']):
self.log("Clear data failed. Error: Could not empty customers ", 'clear')
del_user_meta_query = "DELETE _DBPRF_usermeta FROM _DBPRF_usermeta " \
"LEFT JOIN _DBPRF_users ON _DBPRF_usermeta.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_user_meta_query
})
})
if self._notice['target']['support'].get('plugin_woo_admin') or self.convert_version(self._notice['target']['config']['version'], 2) > 399:
del_customer_lookup_query = "DELETE _DBPRF_wc_customer_lookup FROM _DBPRF_wc_customer_lookup LEFT JOIN _DBPRF_users ON _DBPRF_wc_customer_lookup.user_id = _DBPRF_users.ID WHERE _DBPRF_users.ID IS NULL"
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': del_customer_lookup_query
})
})
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def _check_order_exists(self):
all_collections_query = {
'type': 'select',
'query': "SELECT ID FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund') LIMIT 1"
}
products = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
return True if products['data'] else False
def clear_target_orders(self):
next_clear = {
'result': 'process',
'function': 'clear_target_reviews',
'msg': ''
}
if not self._notice['config']['orders']:
self._notice['target']['clear'] = next_clear
return next_clear
while self._check_order_exists():
# clear posts(orders)
all_collections_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_posts` WHERE post_type IN ('shop_order', 'shop_order_refund')"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_collections_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
# clear meta post(orders)
all_meta_query = {
'type': 'select',
'query': "DELETE `_DBPRF_postmeta` FROM `_DBPRF_post_meta` pm LEFT JOIN `_DBPRF_posts` p ON p.ID = pm.meta_id"
" WHERE p.ID IS NULL"
}
clear_table = self.get_connector_data(self.get_connector_url('query'),
{'query': json.dumps(all_meta_query)})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty products", 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
def clear_target_reviews(self):
next_clear = {
'result': 'process',
'function': 'clear_target_blogs',
'msg': ''
}
if not self._notice['config']['reviews']:
self._notice['target']['clear'] = next_clear
return next_clear
tables = [
'commentmeta',
'comments'
]
for table in tables:
self._notice['target']['clear']['result'] = 'process'
self._notice['target']['clear']['function'] = 'clear_target_reviews'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "`"
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
self._notice['target']['clear'] = next_clear
return self._notice['target']['clear']
# def clear_target_blogs(self):
# next_clear = {
# 'result': 'process',
# 'function': 'clear_target_coupons',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config'].get('blogs'):
# return next_clear
# all_queries = {
# 'term': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_terms WHERE term_id IN (SELECT term_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag']) + ')'
# },
# 'term_taxonomy': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['category', 'post_tag'])
# },
# 'term_relationship': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'postmeta': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_postmeta WHERE post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "post")'
# },
# 'posts': {
# 'type': 'delete',
# 'query': 'DELETE FROM _DBPRF_posts WHERE post_type = "post"'
# },
# }
# delete = self.query_multiple_data_connector(all_queries, 'clear_blog')
# return next_clear
def clear_target_coupons(self):
next_clear = {
'result': 'process',
'function': 'clear_target_pages',
'msg': ''
}
self._notice['target']['clear'] = next_clear
if not self._notice['config']['coupons']:
return next_clear
tables = [
'postmeta',
'posts'
]
for table in tables:
where = ' post_type = "shop_coupon"'
if table == 'postmeta':
where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "shop_coupon")'
clear_table = self.get_connector_data(self.get_connector_url('query'), {
'query': json.dumps({
'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
})
})
if (not clear_table) or (clear_table['result'] != 'success'):
self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
continue
return next_clear
# def clear_target_pages(self):
# next_clear = {
# 'result': 'process',
# 'function': '',
# 'msg': ''
# }
# self._notice['target']['clear'] = next_clear
# if not self._notice['config']['pages']:
# return next_clear
# tables = [
# 'postmeta',
# 'posts'
# ]
# for table in tables:
# where = ' post_type = "page"'
# if table == 'postmeta':
# where = ' post_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = "page")'
# clear_table = self.get_connector_data(self.get_connector_url('query'), {
# 'query': json.dumps({
# 'type': 'query', 'query': "DELETE FROM `_DBPRF_" + table + "` WHERE " + where
# })
# })
# if (not clear_table) or (clear_table['result'] != 'success'):
# self.log("Clear data failed. Error: Could not empty table " + table, 'clear')
# continue
# return next_clear
# TODO: TAX
def prepare_taxes_import(self):
return self
def prepare_taxes_export(self):
return self
def get_taxes_main_export(self):
id_src = self._notice['process']['taxes']['id_src']
limit = self._notice['setting']['taxes']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
# taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
taxes = self.select_data_connector(query, 'taxes')
if not taxes or taxes['result'] != 'success':
return response_error('could not get taxes main to export')
list_taxes = response_success()
if taxes['data'] and to_len(taxes['data']) > 0:
list_taxes['data'] = list()
for tax in taxes['data']:
_taxes = tax['option_value'].splitlines()
if _taxes:
tmp_taxes = [
{
'id': 1,
'name': 'Standard'
}
]
i = 2
for tax_name in _taxes:
tax_data = dict()
tax_data['id'] = i
tax_data['name'] = tax_name
tmp_taxes.append(tax_data)
i += 1
list_taxes['data'].extend(tmp_taxes)
return list_taxes
def get_taxes_ext_export(self, taxes):
url_query = self.get_connector_url('query')
tax_product_class_names = duplicate_field_value_from_list(taxes['data'], 'name')
tax_names = list()
for class_name in tax_product_class_names:
_class_name = to_str(class_name).lower()
_class_name = _class_name.replace(' ', '-')
tax_names.append(_class_name)
taxes_ext_queries = {
'tax_rates': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rates WHERE 1"
# tax_rate_class IN " + self.list_to_in_condition(tax_names),
}
}
# taxes_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(taxes_ext_queries)})
taxes_ext = self.select_multiple_data_connector(taxes_ext_queries, 'taxes')
if not taxes_ext or taxes_ext['result'] != 'success':
return response_error()
tax_zone_ids = duplicate_field_value_from_list(taxes_ext['data']['tax_rates'], 'tax_rate_id')
taxes_ext_rel_queries = {
'tax_rates_location': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_tax_rate_locations WHERE tax_rate_id IN " + self.list_to_in_condition(
tax_zone_ids),
}
}
# taxes_ext_rel = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(taxes_ext_rel_queries)})
taxes_ext_rel = self.select_multiple_data_connector(taxes_ext_rel_queries, 'taxes')
if not taxes_ext_rel or taxes_ext_rel['result'] != 'success':
return response_error()
taxes_ext = self.sync_connector_object(taxes_ext, taxes_ext_rel)
return taxes_ext
def convert_tax_export(self, tax, taxes_ext):
tax_zones = list()
tax_rate_class_1 = to_str(tax['name']).lower()
tax_rate_class_1 = tax_rate_class_1.replace(' ', '-')
if tax['name'] == 'Standard':
tax_rate_class_1 = ''
src_tax_rate = get_list_from_list_by_field(taxes_ext['data']['tax_rates'], 'tax_rate_class', tax_rate_class_1)
if src_tax_rate and to_len(src_tax_rate) > 0:
for tax_rate in src_tax_rate:
tax_zone = self.construct_tax_zone()
# tax_zone = self.addConstructDefault(tax_zone)
tax_zone['id'] = tax_rate['tax_rate_id']
tax_zone['name'] = tax_rate['tax_rate_name']
tax_zone_country = self.construct_tax_zone_country()
tax_zone_country['name'] = self.get_country_name_by_code(tax_rate['tax_rate_country']) if tax_rate['tax_rate_country'] else ''
tax_zone_country['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone_country['country_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_country', '')
tax_zone['country'] = tax_zone_country
tax_zone_state = self.construct_tax_zone_state()
tax_zone_state['name'] = ''
tax_zone_state['code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone_state['state_code'] = get_value_by_key_in_dict(tax_rate, 'tax_rate_state', '')
tax_zone['state'] = tax_zone_state
tax_zone['rate'] = self.construct_tax_zone_rate()
tax_zone['rate']['id'] = tax_rate['tax_rate_id']
tax_zone['rate']['name'] = tax_rate['tax_rate_name']
tax_zone['rate']['code'] = tax_rate['tax_rate_class']
tax_zone['rate']['rate'] = tax_rate['tax_rate']
tax_rates_locations = get_list_from_list_by_field(taxes_ext['data']['tax_rates_location'], 'tax_rate_id', tax_rate['tax_rate_id'])
tax_zone_city = get_list_from_list_by_field(tax_rates_locations, 'location_type', 'city')
tax_zone['postcode'] = get_row_value_from_list_by_field(tax_rates_locations, 'location_type', 'postcode', 'location_code')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
tax_zone['city'] += _tax_zone_city['location_code'] + ';'
tax_zone['priority'] = tax_rate['tax_rate_priority']
tax_zone['compound'] = True if tax_rate['tax_rate_compound'] and to_int(tax_rate['tax_rate_compound']) == 1 else False
tax_zone['is_shipping'] = True if tax_rate['tax_rate_shipping'] and to_int(tax_rate['tax_rate_shipping']) == 1 else False
tax_zones.append(tax_zone)
tax_product = self.construct_tax_product()
tax_product = self.add_construct_default(tax_product)
tax_code = to_str(tax['name']).lower()
tax_code = tax_code.replace(' ', '-')
tax_product['name'] = tax['name']
tax_product['code'] = tax_code
tax_product['created_at'] = get_current_time()
tax_product['updated_at'] = get_current_time()
tax_products = [tax_product]
tax_data = self.construct_tax()
tax_data = self.add_construct_default(tax_data)
# id_src = self._notice['process']['taxes']['id_src']
tax_data['id'] = tax['id']
tax_data['code'] = tax_code # tax['name']
tax_data['name'] = tax['name']
tax_data['created_at'] = get_current_time()
tax_data['updated_at'] = get_current_time()
tax_data['tax_zones'] = tax_zones
tax_data['tax_products'] = tax_products
return response_success(tax_data)
def get_tax_id_import(self, convert, tax, taxes_ext):
# id_src = self._notice['process']['taxes']['id_src']
return tax['id']
def check_tax_import(self, convert, tax, taxes_ext):
return True if self.get_map_field_by_src(self.TYPE_TAX, convert['id'], convert['code']) else False
def router_tax_import(self, convert, tax, taxes_ext):
return response_success('tax_import')
def before_tax_import(self, convert, tax, taxes_ext):
return response_success()
def tax_import(self, convert, tax, taxes_ext):
slug = self.sanitize_title(convert['name'])
if convert['name'] != 'Standard':
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name = 'woocommerce_tax_classes'"
}
taxes = self.select_data_connector(query, 'taxes')
if taxes and taxes['data']:
old_tax_data = taxes['data'][0]
new_option_value = old_tax_data['option_value'] + '\n' + convert['name'] if old_tax_data['option_value'] else convert['name']
query_update = {
'type': 'query',
'query': "UPDATE `_DBPRF_options` SET `option_value` = '" + new_option_value + "' WHERE `option_name` = 'woocommerce_tax_classes'"
}
taxes = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_update)})
else:
tax_data = {
'option_name': 'woocommerce_tax_classes',
'option_value': convert['name'],
'autoload': 'yes'
}
tax_query = self.create_insert_query_connector('options', tax_data)
tax_import = self.import_tax_data_connector(tax_query, True, convert['id'])
if self.convert_version(self._notice['target']['config']['version'], 2) >= 370:
tax_rate_classes = {
'name': convert['name'],
'slug': slug
}
tax_rate_classes_query = self.create_insert_query_connector('wc_tax_rate_classes', tax_rate_classes)
tax_rate_classes_import = self.import_data_connector(tax_rate_classes_query, 'wc_tax_rate_classes')
tax_code = to_str(convert['name']).lower()
tax_code = self.sanitize_title(tax_code.replace(' ', '-'))
self.insert_map(self.TYPE_TAX, convert['id'], 0, convert['code'], tax_code)
return response_success(convert['id'])
def after_tax_import(self, tax_id, convert, tax, taxes_ext):
if convert['tax_zones']:
tax_code = to_str(convert['name']).lower()
tax_code = tax_code.replace(' ', '-')
for tax_zone in convert['tax_zones']:
tax_rate = {
'tax_rate_country': tax_zone['country']['country_code'],
'tax_rate_state': tax_zone['state']['state_code'] if tax_zone['state']['state_code'] else '*',
'tax_rate': tax_zone['rate']['rate'] if tax_zone['rate']['rate'] else '*',
'tax_rate_name': tax_zone['rate']['name'] if tax_zone['rate']['name'] else 'Tax',
'tax_rate_priority': tax_zone.get('priority', 1),
'tax_rate_compound': 1 if tax_zone.get('compound') else 0,
'tax_rate_shipping': 1 if tax_zone.get('is_shipping') else 0,
'tax_rate_order': 0,
'tax_rate_class': '' if convert['name'] == 'Standard' else self.convert_attribute_code(tax_code)
}
tax_rate_query = self.create_insert_query_connector('woocommerce_tax_rates', tax_rate)
tax_rate_import = self.import_data_connector(tax_rate_query, 'tax')
if get_value_by_key_in_dict(tax_zone, 'postcode', False):
location_postcode = {
'location_code': get_value_by_key_in_dict(tax_zone, 'postcode', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'postcode'
}
self.import_data_connector(
self.create_insert_query_connector('woocommerce_tax_rate_locations', location_postcode), 'tax')
if get_value_by_key_in_dict(tax_zone, 'city', False):
tax_zone_city = tax_zone['city'].split(';')
if tax_zone_city:
for _tax_zone_city in tax_zone_city:
if _tax_zone_city != '' and _tax_zone_city != ' ':
location_city = {
'location_code': get_value_by_key_in_dict(tax_zone, 'city', ''),
'tax_rate_id': tax_rate_import,
'location_type': 'city'
}
self.import_data_connector(self.create_insert_query_connector('woocommerce_tax_rate_locations', location_city), 'tax')
return response_success()
def addition_tax_import(self, convert, tax, taxes_ext):
return response_success()
# TODO: MANUFACTURER
def prepare_manufacturers_import(self):
return self
def prepare_manufacturers_export(self):
return self
def get_manufacturers_main_export(self):
id_src = self._notice['process']['manufacturers']['id_src']
limit = self._notice['setting']['manufacturers']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE (tx.taxonomy = 'product_brand' OR tx.taxonomy = 'brand' OR tx.taxonomy = 'pwb-brand') AND tx.term_id > " + to_str(
id_src) + " ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
# manufacturers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
manufacturers = self.select_data_connector(query, 'manufacturers')
if not manufacturers or manufacturers['result'] != 'success':
return response_error('could not get manufacturers main to export')
return manufacturers
def get_manufacturers_ext_export(self, manufacturers):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(manufacturers['data'], 'term_id')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturers_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = 'product_cat' AND tx.term_id > 0 "
}
}
if cart_version > 223:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
manufacturers_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
manufacturers_ext_queries['brand_taxonomy_images'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_options WHERE option_name IN " + self.brand_image_in_condition(category_ids)
}
# manufacturers_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_queries)
# })
manufacturers_ext = self.select_multiple_data_connector(manufacturers_ext_queries, 'manufacturers')
if not manufacturers_ext or manufacturers_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'meta_key',
'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
manufacturers_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
}
# add custom
if manufacturers_ext_rel_queries:
# manufacturers_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(manufacturers_ext_rel_queries)
# })
manufacturers_ext_rel = self.select_multiple_data_connector(manufacturers_ext_rel_queries, 'manufacturers')
if not manufacturers_ext_rel or manufacturers_ext_rel['result'] != 'success':
return response_error()
manufacturers_ext = self.sync_connector_object(manufacturers_ext, manufacturers_ext_rel)
return manufacturers_ext
def convert_manufacturer_export(self, manufacturer, manufacturers_ext):
manufacturer_data = self.construct_manufacturer()
manufacturer_data = self.add_construct_default(manufacturer_data)
manufacturer_path = manufacturer_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
manufacturer_src = False
if cart_version > 223:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'term_id', manufacturer['term_id'])
else:
manufacturer_src = get_list_from_list_by_field(manufacturers_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', manufacturer['term_id'])
if manufacturer_src:
manufacturer_img_id = self.get_value_metadata(manufacturer_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(manufacturers_ext['data']['post_meta'], 'ID', manufacturer_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
manufacturer_path = img_meta[0]['meta_value']
manufacturer_url = to_str(img_meta[0]['guid']).replace(img_meta[0]['meta_value'], '')
brand_image = get_row_value_from_list_by_field(manufacturers_ext['data']['brand_taxonomy_images'], 'option_name', "brand_taxonomy_image" + to_str(manufacturer['term_id']), 'option_value')
if brand_image:
manufacturer_url = brand_image
manufacturer_data['id'] = manufacturer['term_id']
manufacturer_data['code'] = manufacturer['slug']
manufacturer_data['name'] = manufacturer['name']
manufacturer_data['description'] = manufacturer['description']
manufacturer_data['thumb_image']['label'] = img_label
manufacturer_data['thumb_image']['url'] = manufacturer_url
manufacturer_data['thumb_image']['path'] = manufacturer_path
manufacturer_data['created_at'] = get_current_time()
manufacturer_data['updated_at'] = get_current_time()
language_id = self._notice['src']['language_default']
manufacturer_language_data = dict()
manufacturer_language_data['name'] = manufacturer['name']
manufacturer_language_data['description'] = manufacturer['description']
manufacturer_data['languages'][language_id] = manufacturer_language_data
manufacturer_data['manufacturer'] = manufacturer
manufacturer_data['manufacturers_ext'] = manufacturers_ext
return response_success(manufacturer_data)
def get_manufacturer_id_import(self, convert, manufacturer, manufacturers_ext):
return manufacturer['term_id']
def check_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return True if self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['id']) else False
def router_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success('manufacturer_import')
def before_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
def manufacturer_import(self, convert, manufacturer, manufacturers_ext):
slug = self.sanitize_title(convert['name'])
manufacturer_term = {
'name': convert['name'],
'slug': convert['code'] if convert['code'] else slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'category')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
taxonomy = 'berocket_brand'
if self._notice['target']['config'].get('brand_taxonomy'):
taxonomy = self._notice['target']['config']['brand_taxonomy']
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': taxonomy,
'description': get_value_by_key_in_dict(convert, 'description', ''),
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if not manufacturer_taxonomy_import:
return response_warning('manufacturer ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_MANUFACTURER, convert['id'], manufacturer_taxonomy_import, convert['code'])
thumbnail_id = False
cate_image = ''
if convert['thumb_image']['url'] or convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'], convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path'], self.TYPE_MANUFACTURER), self._notice['target']['config']['image_manufacturer'].rstrip('/')))
if image_import_path:
cate_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_category'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(cate_image, image_details)
if thumbnail_id:
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'pwb_brand_image',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
# 'meta_key': 'thumbnail_id',
'meta_key': 'thumbnail_id',
'meta_value': thumbnail_id
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
meta_insert = {
'term_id': term_id,
'meta_key': 'brand_image_url',
'meta_value': self._notice['target']['cart_url'].rstrip('/') + '/wp-content/uploads/' + cate_image.lstrip('/')
}
meta_query = self.create_insert_query_connector('termmeta', meta_insert)
self.import_data_connector(meta_query, 'manufacturer')
if self.is_wpml() or self._notice['target']['support']['yoast_seo']:
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if not option_value:
option_value = dict()
if taxonomy not in option_value.keys():
option_value[taxonomy] = dict()
option_value[taxonomy][to_int(term_id)] = {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
data_set = {
'option_value': php_serialize(option_value)
}
where = {
'option_id': options_data['data'][0]['option_id'],
'option_name': 'wpseo_taxonomy_meta'
}
self.import_data_connector(self.create_update_query_connector('options', data_set, where), 'manufacturer')
else:
new_option_data = {
'option_name': 'wpseo_taxonomy_meta',
'option_value': php_serialize({
taxonomy: {
to_int(term_id): {
'wpseo_title': get_value_by_key_in_dict(convert, 'meta_title', ''),
'wpseo_desc': get_value_by_key_in_dict(convert, 'meta_description', ''),
'wpseo_linkdex': 0,
'wpseo_content_score': 0
}
}
}),
'autoload': 'yes'
}
self.import_data_connector(self.create_insert_query_connector('options', new_option_data), 'manufacturer')
return response_success(manufacturer_taxonomy_import)
def after_manufacturer_import(self, manufacturer_id, convert, manufacturer, manufacturers_ext):
return response_success()
def addition_manufacturer_import(self, convert, manufacturer, manufacturers_ext):
return response_success()
# TODO: CATEGORY
def prepare_categories_import(self):
parent = super().prepare_categories_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
return self
def prepare_categories_export(self):
return self
def get_categories_main_export(self):
id_src = self._notice['process']['categories']['id_src']
limit = self._notice['setting']['categories']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id"
" WHERE tx.taxonomy = 'product_cat' AND tx.term_id > " + to_str(
id_src) + " AND t.term_id IS NOT NULL ORDER BY tx.term_id ASC LIMIT " + to_str(limit)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
# "LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
# "LEFT JOIN _DBPRF_icl_translations il ON tt.term_id = il.element_id "
# "WHERE tt.term_id and il.`source_language_code` is NULL and il.`element_type` = 'tax_product_cat' and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
# id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
'query': "SELECT * FROM _DBPRF_term_taxonomy tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' and il.`source_language_code` IS NULL and tt.taxonomy = 'product_cat' and tt.term_id > " + to_str(
id_src) + " ORDER BY tt.term_id ASC LIMIT " + to_str(limit),
}
# categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
categories = self.select_data_connector(query, 'categories')
if not categories or categories['result'] != 'success':
return response_error('could not get manufacturers main to export')
return categories
def get_categories_ext_export(self, categories):
url_query = self.get_connector_url('query')
category_ids = duplicate_field_value_from_list(categories['data'], 'term_id')
parent_ids = duplicate_field_value_from_list(categories['data'], 'parent')
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
taxonomy_type = 'product_cat' if not categories.get('is_blog') else 'category'
categories_ext_queries = {
'all_category': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id > 0 "
},
'seo_categories': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id IN " + self.list_to_in_condition(parent_ids)
}
}
if cart_version > 255:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_termmeta WHERE term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
else:
categories_ext_queries['woocommerce_termmeta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_termmeta WHERE woocommerce_term_id IN " + self.list_to_in_condition(
category_ids) + " AND meta_key IN ('order', 'thumbnail_id', 'display_type')"
}
# add wpml
if self._notice['src']['support']['wpml']:
categories_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'tax_product_cat' and element_id IN " + self.list_to_in_condition(
category_ids)
}
# categories_ext = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_queries)
# })
categories_ext = self.get_connector_data(url_query, {
'serialize': True,
'query': json.dumps(categories_ext_queries)
})
if not categories_ext or categories_ext['result'] != 'success':
return response_warning()
thumb_id_list = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'meta_key', 'thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumb_id_list, 'meta_value')
thumb_ids_query = self.list_to_in_condition(thumbnail_ids)
categories_ext_rel_queries = {
'post_meta': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' WHERE p.ID IN " + thumb_ids_query
}
# 'seo_category': array(
# 'type': 'select',
# 'query': "SELECT * FROM _DBPRF_options WHERE option_id = 235866",
# ),
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(categories_ext['data']['icl_translations'], 'trid')
categories_ext_rel_queries['wpml_category_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type = 'tax_product_cat' and il.trid IN " + self.list_to_in_condition(trids)
}
# add seo
# if (self._notice['config']['seo']){
# ext_rel_seo_queries = model_seo->getCategoriesSeoExtRelQuery(this, categories, categories_ext)
# categories_ext_rel_queries = array_merge(categories_ext_rel_queries, ext_rel_seo_queries)
# }
# add custom
if categories_ext_rel_queries:
# categories_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True,
# 'query': json.dumps(categories_ext_rel_queries)
# })
categories_ext_rel = self.select_multiple_data_connector(categories_ext_rel_queries, 'categories')
if not categories_ext_rel or categories_ext_rel['result'] != 'success':
return response_error()
categories_ext = self.sync_connector_object(categories_ext, categories_ext_rel)
return categories_ext
def convert_category_export(self, category, categories_ext):
category_data = self.construct_category() if not self.blog_running else self.construct_blog_category()
# category_data = self.add_construct_default(category_data)
parent = self.construct_category_parent() if not self.blog_running else self.construct_blog_category()
parent['id'] = 0
if category['parent'] and to_int(category['parent']) > 0:
parent_data = self.get_category_parent(category['parent'])
if parent_data['result'] == 'success' and parent_data['data']:
parent = parent_data['data']
category_path = img_meta = category_url = img_label = ''
cart_version = self.convert_version(self._notice['src']['config']['version'], 2)
if cart_version > 255:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'term_id', category['term_id'])
else:
category_src = get_list_from_list_by_field(categories_ext['data']['woocommerce_termmeta'], 'woocommerce_term_id', category['term_id'])
if category_src:
category_img_id = self.get_value_metadata(category_src, 'thumbnail_id', 0)
img_meta = get_list_from_list_by_field(categories_ext['data']['post_meta'], 'ID', category_img_id)
if img_meta:
img_label = img_meta[0]['post_title']
category_path = to_str(img_meta[0]['meta_value'])
category_url = to_str(img_meta[0]['guid']).replace(category_path, '')
category_data['id'] = category['term_id']
category_data['code'] = category['slug']
category_data['name'] = category['name']
category_data['description'] = category['description']
category_data['parent'] = parent
category_data['active'] = True
category_data['thumb_image']['label'] = img_label
category_data['thumb_image']['url'] = category_url
category_data['thumb_image']['path'] = category_path
category_data['sort_order'] = 1
category_data['created_at'] = get_current_time()
category_data['updated_at'] = get_current_time()
category_data['category'] = category
category_data['categories_ext'] = categories_ext
# todo: woo2woo
category_data['display_type'] = self.get_value_metadata(category_src, 'display_type', '')
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(categories_ext['data']['icl_translations'], 'element_id', category['term_taxonomy_id'], 'trid')
if trid:
languages_data = get_list_from_list_by_field(categories_ext['data']['wpml_category_lang'], 'trid', trid)
if languages_data:
for language_data in languages_data:
category_new_data = self.construct_category_lang()
category_new_data['id'] = language_data['term_id']
category_new_data['code'] = language_data['slug']
category_new_data['name'] = language_data['name']
category_new_data['description'] = language_data['description']
if to_int(language_data['term_id']) == to_int(category['term_id']):
category_data['language_default'] = language_data['language_code']
elif 'language_default' not in category_data and not language_data['source_language_code']:
category_data['language_default'] = language_data['language_code']
category_data['languages'][language_data['language_code']] = category_new_data
else:
category_language_data = self.construct_category_lang()
language_id = self._notice['src']['language_default']
category_language_data['name'] = category['name']
category_language_data['description'] = category['description']
category_data['languages'][language_id] = category_language_data
query_wpseo = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_options` WHERE `option_name` = 'wpseo_taxonomy_meta'"
}
options_data = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query_wpseo)})
if options_data and options_data['data']:
option_value = php_unserialize(options_data['data'][0]['option_value'])
if option_value and 'product_cat' in option_value:
if to_int(category['term_id']) in option_value['product_cat']:
category_data['meta_title'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_title', '')
category_data['meta_description'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_desc', '')
category_data['meta_keyword'] = get_value_by_key_in_dict(option_value['product_cat'][to_int(category['term_id'])], 'wpseo_focuskw', '')
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
category_data['seo'] = getattr(self, 'categories_' + detect_seo)(category, categories_ext)
return response_success(category_data)
def get_category_parent(self, parent_id):
type_map = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
category_exist = self.select_map(self._migration_id, type_map, parent_id)
if category_exist:
return response_success({
'id': parent_id,
'code': ''
})
taxonomy_type = 'product_cat' if not self.blog_running else 'category'
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy as tx LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tx.taxonomy = '" + taxonomy_type + "' AND tx.term_id = " + to_str(parent_id)
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy tt LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"LEFT JOIN _DBPRF_icl_translations il ON tt.term_taxonomy_id = il.element_id "
"WHERE il.`element_type` = 'tax_product_cat' AND il.`source_language_code` IS NULL AND tt.taxonomy = '" + taxonomy_type + "' and tt.term_id = " + to_str(parent_id),
}
categories = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
if not categories or categories['result'] != 'success':
return response_error('could not get category parent to export')
if categories and categories['data']:
category = categories['data'][0]
categories_ext = self.get_categories_ext_export(categories)
category_convert = self.convert_category_export(category, categories_ext)
return category_convert
return response_error('could not get category parent to export')
def get_category_id_import(self, convert, category, categories_ext):
return category['term_id']
def check_category_import(self, convert, category, categories_ext):
id_imported = self.get_map_field_by_src(self.TYPE_CATEGORY, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
return True if id_imported else False
def router_category_import(self, convert, category, categories_ext):
return response_success('category_import')
def before_category_import(self, convert, category, categories_ext):
return response_success()
def category_import(self, convert, category, categories_ext):
category_data = {
'name': convert['name'],
'slug': convert['url_key']
}
id_category = self.import_category_data_connector(self.create_insert_query_connector('terms', category_data),
True, convert['id'])
self.insert_map(self.TYPE_CATEGORY, convert['id'], id_category, convert['code'])
meta_cate_data = {
'order': 0,
'display_type': '',
'thumbnail_id': '',
'product_count_product_cat': '0'
}
for meta_key, meta_value in meta_cate_data.items():
meta_data = {
'term_id': id_category,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_category_data_connector(self.create_insert_query_connector('termmeta', data=meta_data), 'categories')
taxonomy_cate_data = {
'term_id': id_category,
'taxonomy': 'product_cat',
'description': convert['description'],
'parent': '',
'count': ''
}
self.import_category_data_connector(self.create_insert_query_connector('term_taxonomy', taxonomy_cate_data), True, convert['id'])
return response_success(id_category)
def get_new_trid(self):
query = {
'type': 'select',
'query': "SELECT max(trid) as trid FROM _DBPRF_icl_translations"
}
trid = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
new_trid = 1
if trid['data']:
new_trid = to_int(trid['data'][0]['trid']) + 1
return new_trid
def after_category_import(self, category_id, convert, category, categories_ext):
return response_success()
def addition_category_import(self, convert, category, categories_ext):
return response_success()
# TODO: PRODUCT
def prepare_products_import(self):
parent = super().prepare_products_import()
if self._notice['config']['seo'] or self._notice['config']['seo_301']:
query = self.dict_to_create_table_sql(self.lecm_rewrite_table_construct())
self.query_data_connector({'type': 'query', 'query': query['query']})
if not self._notice['config']['add_new']:
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if os.path.isfile(file_name):
os.remove(file_name)
return self
def prepare_products_export(self):
return self
def get_products_main_export(self):
id_src = self._notice['process']['products']['id_src']
limit = self._notice['setting']['products']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'product' AND post_status NOT IN ('inherit','auto-draft') ORDER BY ID ASC LIMIT " + to_str(limit),
}
if self._notice['src']['support']['wpml']:
query = {
'type': 'select',
# 'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
# "WHERE il.`element_type` = 'post_product' and il.`source_language_code` is NULL and p.ID and p.ID > " + to_str(
# id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
# limit),
'query': "SELECT * FROM _DBPRF_posts p LEFT JOIN _DBPRF_icl_translations il ON p.ID = il.element_id "
"WHERE il.`source_language_code` is NULL and il.`element_type` = 'post_product' AND p.ID > " + to_str(
id_src) + " AND p.post_type = 'product' AND p.post_status NOT IN ('inherit','auto-draft') ORDER BY p.ID ASC LIMIT " + to_str(
limit),
}
# products = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success':
return response_error()
return products
def get_products_ext_export(self, products):
url_query = self.get_connector_url('query')
product_ids = duplicate_field_value_from_list(products['data'], 'ID')
product_id_con = self.list_to_in_condition(product_ids)
# product_id_query = self.product_to_in_condition_seourl(product_ids)
linked = self.product_to_in_condition_linked(product_ids)
product_ext_queries = {
'post_variant': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'product_variation' AND post_parent IN " + product_id_con,
},
'term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + product_id_con,
},
'post_grouped': {
'type': "select",
'query': "SELECT * FROM _DBPRF_posts WHERE post_parent IN " + product_id_con + " AND post_type = 'product'",
},
'parent_link': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key IN ('_upsell_ids','_crosssell_ids') AND meta_value " + linked
},
}
if self._notice['src']['support']['wpml']:
product_ext_queries['icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and element_id IN " + product_id_con
}
# products_ext = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_queries)
# })
products_ext = self.select_multiple_data_connector(product_ext_queries, 'products')
if (not products_ext) or products_ext['result'] != 'success':
return response_error()
pro_child_ids = duplicate_field_value_from_list(products_ext['data']['post_variant'], 'ID')
all_product_ids = self.list_to_in_condition(list(set(pro_child_ids + product_ids)))
variant_id_query = self.list_to_in_condition(pro_child_ids)
taxonomy_duplicate = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'taxonomy')
attrs_taxonomy = self.get_list_from_list_by_field_as_first_key(taxonomy_duplicate, '', 'pa_')
attrs_name = list()
for attr_taxonomy in attrs_taxonomy:
attrs_name.append(self.substr_replace(attr_taxonomy, '', 0, 3))
attr_name_query = self.list_to_in_condition(attrs_name)
attr_values = duplicate_field_value_from_list(products_ext['data']['term_relationship'], 'term_id')
attr_values_query = self.list_to_in_condition(attr_values)
product_ext_rel_queries = {
'post_meta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + all_product_ids,
},
'woocommerce_attribute_taxonomies': {
'type': "select",
'query': "SELECT * FROM _DBPRF_woocommerce_attribute_taxonomies WHERE attribute_name IN " + attr_name_query,
},
'variation_term_relationship': {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE tr.object_id IN " + variant_id_query,
},
'term_attribute': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_terms WHERE term_id IN " + attr_values_query,
}
}
if self._notice['src']['support']['wpml']:
trids = duplicate_field_value_from_list(products_ext['data']['icl_translations'], 'trid')
product_ext_rel_queries['wpml_product_lang'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_posts as p ON il.element_id = p.ID "
"WHERE il.element_type = 'post_product' and il.trid IN " + self.list_to_in_condition(trids)
}
product_ext_rel_queries['wpml_product_meta'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")"
}
product_ext_rel_queries['wpml_term_relationship'] = {
'type': "select",
'query': "SELECT * FROM _DBPRF_term_relationships AS tr "
"LEFT JOIN _DBPRF_term_taxonomy AS tx ON tx.term_taxonomy_id = tr.term_taxonomy_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id WHERE tr.object_id IN (SELECT element_id FROM _DBPRF_icl_translations WHERE element_type = 'post_product' and trid IN " + self.list_to_in_condition(trids) + ")",
}
product_ext_rel_queries['attributes_icl_translations'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_icl_translations il "
"LEFT JOIN _DBPRF_term_taxonomy as tx ON il.element_id = tx.term_id "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tx.term_id "
"WHERE il.element_type IN " + self.wpml_attributes_to_in_condition(
attrs_taxonomy)
}
# products_ext_rel = self.get_connector_data(url_query, {
# 'serialize': True, 'query': json.dumps(product_ext_rel_queries)
products_ext_rel = self.select_multiple_data_connector(product_ext_rel_queries, 'products')
if (not products_ext_rel) or products_ext_rel['result'] != 'success':
return response_error()
thumbnail_id_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_thumbnail_id')
thumbnail_ids = duplicate_field_value_from_list(thumbnail_id_list, 'meta_value')
gallery_ids = gallery_ids_src = list()
gallery_list = get_list_from_list_by_field(products_ext_rel['data']['post_meta'], 'meta_key', '_product_image_gallery')
if gallery_list:
for gallery in gallery_list:
if 'meta_value' in gallery and gallery['meta_value']:
images_ids = gallery['meta_value'].split(',')
if images_ids:
gallery_ids = list(set(gallery_ids + images_ids))
for id in gallery_ids:
if id != '':
gallery_ids_src.append(id)
all_images_ids = list(set(thumbnail_ids + gallery_ids_src))
all_images_ids_query = self.list_to_in_condition(all_images_ids)
product_ext_rel_third_queries = {
'image': {
'type': 'select',
'query': "SELECT p.ID, p.post_title, pm.meta_value, p.guid FROM _DBPRF_posts AS p "
"LEFT JOIN _DBPRF_postmeta AS pm ON pm.post_id = p.ID AND pm.meta_key = '_wp_attached_file' "
"WHERE p.ID IN " + all_images_ids_query,
}
}
products_ext_third = self.get_connector_data(url_query, {
'serialize': True, 'query': json.dumps(product_ext_rel_third_queries)
})
if (not products_ext_third) or products_ext_third['result'] != 'success':
return response_error()
products_ext1 = self.sync_connector_object(products_ext_rel, products_ext_third)
products_ext = self.sync_connector_object(products_ext, products_ext1)
return products_ext
def convert_product_export(self, product, products_ext):
product_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product['ID'])
product_data = self.construct_product()
product_data = self.add_construct_default(product_data)
product_data['id'] = product['ID']
product_data['code'] = product['post_name']
product_data['sku'] = self.get_value_metadata(product_meta, '_sku')
# todo: get type prd virtual
product_type = get_row_value_from_list_by_field(product_meta, 'meta_key', '_virtual', 'meta_value')
if product_type == 'yes':
product_data['type'] = 'virtual'
product_price = ''
if to_decimal(self.get_value_metadata(product_meta, '_regular_price', 0.0000)) > 0:
product_price = self.get_value_metadata(product_meta, '_regular_price', 0.0000)
else:
product_price = self.get_value_metadata(product_meta, '_price', 0.0000)
if product_price == '' or product_price == self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000):
product_price = self.get_value_metadata(product_meta, '_min_variation_regular_price', 0.0000)
if product_price == '' or not product_price:
product_price = 0
product_data['price'] = product_price
product_data['weight'] = self.get_value_metadata(product_meta, '_weight', 0.0000)
product_data['length'] = self.get_value_metadata(product_meta, '_length', 0.0000)
product_data['width'] = self.get_value_metadata(product_meta, '_width', 0.0000)
product_data['height'] = self.get_value_metadata(product_meta, '_height', 0.0000)
product_data['status'] = True if product['post_status'] == "publish" else False
product_data['manage_stock'] = True if self.get_value_metadata(product_meta, '_manage_stock', 'no') == "yes" else False
if self.is_woo2woo():
product_data['is_in_stock'] = self.get_value_metadata(product_meta, '_stock_status', 'instock')
product_data['sold_individually'] = self.get_value_metadata(product_meta, '_sold_individually', '')
product_data['purchase_note'] = self.get_value_metadata(product_meta, '_purchase_note', '')
else:
product_data['is_in_stock'] = True if self.get_value_metadata(product_meta, '_stock_status', 'instock') == "instock" else False
product_data['qty'] = to_int(to_decimal(self.get_value_metadata(product_meta, '_stock', 0))) if to_decimal(self.get_value_metadata(product_meta, '_stock', 0)) > 0 else 0
product_data['created_at'] = convert_format_time(product['post_date'])
product_data['updated_at'] = convert_format_time(product['post_modified'])
product_data['name'] = product['post_title']
product_data['description'] = product['post_content']
product_data['short_description'] = product['post_excerpt']
product_data['menu_order'] = product['menu_order']
product_data['sort_order'] = product['menu_order']
product_data['backorders'] = self.get_value_metadata(product_meta, '_backorders', 'no')
product_data['meta_description'] = self.get_value_metadata(product_meta, '_yoast_wpseo_metadesc', '')
product_data['meta_title'] = self.get_value_metadata(product_meta, '_yoast_wpseo_title', '')
if product_data['meta_title']:
product_data['meta_title'] = product_data['meta_title'].replace('%%title%%', product_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
# image_
thumbnail_id = self.get_value_metadata(product_meta, '_thumbnail_id', 0)
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
product_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
product_data['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + to_str(thumbnail_src[0]['meta_value']).lstrip('/')
product_data['thumb_image']['url'] = to_str(product_data['thumb_image']['url']).replace('uploads/uploads', 'uploads')
gallery_ids = self.get_value_metadata(product_meta, '_product_image_gallery', '')
if gallery_ids:
gallery_ids = gallery_ids.split(',')
for gallery_id in gallery_ids:
image_gallery_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', gallery_id)
product_image_data = self.construct_product_image()
if image_gallery_src:
product_image_data['label'] = image_gallery_src[0]['post_title']
product_image_data['url'] = self._notice['src']['cart_url'].rstrip('/') + '/wp-content/uploads/' + image_gallery_src[0]['meta_value'].lstrip('/')
product_image_data['url'] = to_str(product_image_data['url']).replace('uploads/uploads', 'uploads')
product_data['images'].append(product_image_data)
sale_price = self.get_value_metadata(product_meta, '_sale_price', '')
if sale_price != '':
product_data['special_price']['price'] = to_decimal(sale_price)
start_date = self.get_value_metadata(product_meta, '_sale_price_dates_from', '')
if start_date:
product_data['special_price']['start_date'] = convert_format_time(start_date)
end_date = self.get_value_metadata(product_meta, '_sale_price_dates_to', '')
if end_date:
product_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(product_meta, '_sale_price_dates_to', ''))
else:
product_data['special_price']['price'] = self.get_value_metadata(product_meta, '_min_variation_sale_price', 0.0000)
if not product_data['special_price']['price']:
product_data['special_price']['price'] = 0
crosssell_ids = self.get_value_metadata(product_meta, '_crosssell_ids', '')
if crosssell_ids:
crosssell_ids_data = php_unserialize(crosssell_ids)
if crosssell_ids_data:
for crosssell_id in crosssell_ids_data:
relation = self.construct_product_relation()
relation['id'] = crosssell_id
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['children'].append(relation)
parent_crosssell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_crosssell_ids')
if parent_crosssell_list:
for parent_crosssell in parent_crosssell_list:
if parent_crosssell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_crosssell['post_id']
relation['type'] = self.PRODUCT_CROSS
product_data['relate']['parent'].append(relation)
upsell_ids = self.get_value_metadata(product_meta, '_upsell_ids', '')
if upsell_ids:
upsell_ids_data = php_unserialize(upsell_ids)
if upsell_ids_data:
for upsell_id in upsell_ids_data:
relation = self.construct_product_relation()
relation['id'] = upsell_id
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['children'].append(relation)
parent_upsell_list = get_list_from_list_by_field(products_ext['data']['parent_link'], 'meta_key', '_upsell_ids')
if parent_upsell_list:
for parent_upsell in parent_upsell_list:
if parent_upsell['meta_value'].find(':' + to_str(product['ID']) + ';') != -1:
relation = self.construct_product_relation()
relation['id'] = parent_upsell['post_id']
relation['type'] = self.PRODUCT_UPSELL
product_data['relate']['parent'].append(relation)
product_data['tax']['code'] = self.get_value_metadata(product_meta, '_tax_class', 'standard') if self.get_value_metadata(product_meta, '_tax_status', 'taxable') != 'none' else None
product_data['tax']['status'] = self.get_value_metadata(product_meta, '_tax_status', 'taxable')
# category product
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
product_category_data = self.construct_product_category()
product_category_data['id'] = product_category['term_id']
product_category_data['code'] = product_category['slug']
product_data['categories'].append(product_category_data)
if self._notice['src']['support']['manufacturers']:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'product_brand')
if not manu_src:
manu_src = get_row_from_list_by_field(term_relationship, 'taxonomy', 'pwb-brand')
if manu_src:
product_manufacturer_data = dict()
product_manufacturer_data['id'] = manu_src['term_id']
product_manufacturer_data['name'] = manu_src['name']
product_manufacturer_data['code'] = manu_src['slug']
product_data['manufacturer'] = product_manufacturer_data
# tags
product_tags = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_tag')
if product_tags:
tags = list()
for product_tag in product_tags:
tags.append(product_tag['name'])
if tags:
product_data['tags'] = ','.join(tags)
# if self._notice['config']['seo']:
detect_seo = self.detect_seo()
product_data['seo'] = getattr(self, 'products_' + detect_seo)(product, products_ext)
# TODO: convert product languages
if self._notice['src']['support']['wpml']:
trid = get_row_value_from_list_by_field(products_ext['data']['icl_translations'], 'element_id', product['ID'], 'trid')
if trid:
language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_lang'], 'trid', trid)
if language_datas:
for language_data in language_datas:
if not language_data['post_title']:
continue
meta_language_datas = get_list_from_list_by_field(products_ext['data']['wpml_product_meta'], 'post_id', language_data['ID'])
term_relationship_language = get_list_from_list_by_field(products_ext['data']['wpml_term_relationship'], 'object_id', language_data['ID'])
product_new_data = self.construct_product_lang()
product_new_data['name'] = language_data['post_title']
product_new_data['code'] = language_data['post_name']
product_new_data['description'] = language_data['post_content']
product_new_data['short_description'] = language_data['post_excerpt']
product_new_data['meta_description'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_metadesc', '')
product_new_data['meta_title'] = self.get_value_metadata(meta_language_datas, '_yoast_wpseo_title', '')
if product_new_data['meta_title']:
product_new_data['meta_title'] = product_new_data['meta_title'].replace('%%title%%', product_new_data['name']).replace('%%page%%', '').replace('%%sep%%', '-').replace('%%sitename%%', '')
wpml_product_tags = get_list_from_list_by_field(term_relationship_language, 'taxonomy', 'product_tag')
if wpml_product_tags:
wpml_tags = list()
for wpml_product_tag in wpml_product_tags:
wpml_tags.append(wpml_product_tag['name'])
if wpml_tags:
product_new_data['tags'] = ','.join(wpml_tags)
if not language_data['source_language_code']:
product_data['language_default'] = language_data['language_code']
product_data['languages'][language_data['language_code']] = product_new_data
else:
product_language_data = self.construct_product_lang()
product_language_data['name'] = product['post_title']
product_language_data['description'] = product['post_content']
product_language_data['short_description'] = product['post_excerpt']
language_id = self._notice['src']['language_default']
product_data['languages'][language_id] = product_language_data
# attribute product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
# todo: attribute
product_attribute = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_attributes', 'meta_value')
product_attribute = php_unserialize(product_attribute)
if isinstance(product_attribute, str):
product_attribute = php_unserialize(product_attribute)
src_option_values = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
attribute_variants = list()
if product_attribute:
for attribute_key, attribute in product_attribute.items():
if to_int(attribute.get('is_taxonomy')) > 0:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute_key).replace('pa_', ''))
if not woo_attribute:
woo_attribute = get_row_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', to_str(attribute['name']).replace('pa_', ''))
else:
woo_attribute = None
if woo_attribute:
# attributes
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = woo_attribute['attribute_id']
option_code = to_str(woo_attribute['attribute_name']).lower()
attribute_data['option_code'] = option_code.strip()
attribute_data['option_type'] = woo_attribute['attribute_type']
attribute_data['option_name'] = woo_attribute['attribute_label']
attribute_data['option_group'] = woo_attribute['attribute_orderby']
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
attribute_data['is_taxonomy'] = True if to_int(attribute.get('is_taxonomy')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = woo_attribute['attribute_label']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
tmp_values = list()
desc = list()
for option_value in src_option_values:
attribute_name = 'pa_' + to_str(woo_attribute['attribute_name']).lower()
if 'taxonomy' in option_value:
if option_value['taxonomy'] == attribute_name:
woo_term_values = get_list_from_list_by_field(
products_ext['data']['term_attribute'], 'term_id', option_value['term_id'])
if woo_term_values:
for woo_term in woo_term_values:
attribute_value = woo_term['name']
if woo_attribute['attribute_type'] in ['select', 'alg_wc_civs_image']:
option_values = to_str(woo_term['name']).split('|')
if option_values and to_len(option_values) > 1:
attribute_value = ';'.join(option_values)
tmp_values.append(attribute_value)
desc.append(option_value['description'])
values = list(map(lambda x: x.strip(), tmp_values))
if values and to_len(values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
attribute_data['option_value_name'] = ';'.join(values)
attribute_data['option_value_description'] = ';'.join(desc)
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': ';'.join(values)
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
else:
if ('is_visible' in attribute and to_int(attribute['is_visible']) == 1) or ('visible' in attribute and attribute['visible'] == 'yes'):
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
attribute_value = attribute['value']
if attribute_value and attribute_value != '':
option_values = list()
if isinstance(attribute_value, dict):
for key, attr_value in attribute_value.items():
option_values.append(attr_value)
else:
option_values = attribute_value.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = 'multiselect'
option_values = list(map(lambda x: x.strip(), option_values))
attribute_value = ';'.join(option_values)
attribute_data['option_value_name'] = attribute_value
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': attribute_value
}
# product_data['attributes'].append(attribute_data)
else:
attribute_data = self.construct_product_attribute()
attribute_data['option_id'] = None
option_code = to_str(attribute['name']).lower()
attribute_data['option_code'] = option_code.lower().strip()
attribute_data['option_type'] = 'text'
attribute_data['option_name'] = attribute['name']
attribute_data['option_group'] = 'menu_order'
attribute_data['is_visible'] = attribute.get('is_visible', 'visible')
attribute_data['is_variation'] = True if to_int(attribute.get('is_variation')) == 1 else False
# attribute language
attribute_language_data = self.construct_product_option_lang()
attribute_language_data['option_name'] = attribute['name']
language_id = self._notice['src']['language_default']
attribute_data['option_languages'][language_id] = attribute_language_data
# attribute values
option_values = attribute['value']
if option_values != '':
option_values = option_values.split('|')
if option_values and to_len(option_values) > 1:
attribute_data['option_type'] = self.OPTION_MULTISELECT
option_values = list(map(lambda x: x.strip(), option_values))
option_values = ';'.join(option_values)
attribute_data['option_value_name'] = option_values
attribute_data['option_value_languages'][self._notice['src']['language_default']] = {
'option_value_name': option_values
}
if (to_int(attribute.get('is_variation')) == 1 or to_str(attribute.get('variation')) == 'yes') and not self.is_woo2woo():
attribute_variants.append(attribute_data)
else:
product_data['attributes'].append(attribute_data)
# end
# todo: plugin add-ons
if self._notice['src']['support'].get('addons') and not self.is_woo2woo():
product_addons = get_row_value_from_list_by_field(product_meta, 'meta_key', '_product_addons', 'meta_value')
product_addons = php_unserialize(product_addons)
if product_addons and to_len(product_addons) > 0:
for product_addon in product_addons:
if not product_addon.get('options') or to_len(product_addon['options']) == 0:
continue
if product_addon.get('type') == 'radiobutton':
option_type = self.OPTION_RADIO
else:
option_type = self.OPTION_SELECT
product_option = self.construct_product_option()
product_option['code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_code'] = self.convert_attribute_code(product_addon.get('name'))
product_option['option_name'] = product_addon.get('name')
product_option['type'] = option_type
product_option['position'] = product_addon.get('position')
product_option['required'] = True if product_addon.get('required') and to_int(product_addon.get('required')) == 1 else False
product_addon_options = list()
if isinstance(product_addon.get('options'), dict):
for key, product_addon_value in product_addon['options'].items():
product_addon_options.append(product_addon_value)
else:
product_addon_options = product_addon.get('options')
for product_addon_value in product_addon_options:
product_option_value = self.construct_product_option_value()
product_option_value['code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_code'] = self.convert_attribute_code(product_addon_value.get('label'))
product_option_value['option_value_name'] = product_addon_value.get('label')
product_option_value['option_value_price'] = product_addon_value.get('price')
if 'Color' in product_addon.get('name', '') or 'Colour' in product_addon.get('name', ''):
if 'RNBP' in product_addon_value.get('label', ''):
product_option_value['thumb_image']['path'] = self.convert_attribute_code(to_str(product_addon_value.get('label')).replace(' (RNBP)', '')) + '.jpg'
product_option_value['thumb_image']['url'] = self._notice['src']['cart_url'].rstrip('/') + '/assets/blind-images/rnbp/'
product_option['values'].append(product_option_value)
product_data['options'].append(product_option)
# todo: downloadable
product_downloadables = get_row_value_from_list_by_field(product_meta, 'meta_key', '_downloadable_files', 'meta_value')
product_downloadables = php_unserialize(product_downloadables)
if product_downloadables:
product_data['type'] = self.PRODUCT_DOWNLOAD
for key, product_downloadable in product_downloadables.items():
download_data = self.construct_product_downloadable()
download_data['limit'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_limit', 'meta_value')
download_data['max_day'] = get_row_value_from_list_by_field(product_meta, 'meta_key', '_download_expiry', 'meta_value')
name_file = to_str(product_downloadable['file']).split('/') if product_downloadable.get('file') else None
if self._notice['src']['cart_url'] in product_downloadable['file'] and name_file:
download_data['name'] = to_str(product_downloadable['file']).split('/')
download_data['path'] = 'woocommerce/' + to_str(name_file[to_len(name_file) - 1]).lower()
else:
download_data['name'] = product_downloadable['name']
download_data['path'] = product_downloadable['file']
# Thieu max_day,limit
product_data['downloadable'].append(download_data)
# todo: group product
child_group_product = self.get_value_metadata(product_meta, '_children', '')
if child_group_product:
child_group_product = php_unserialize(child_group_product)
if child_group_product and to_len(child_group_product) > 0:
for child_group_product_id in child_group_product:
product_data['group_child_ids'].append({
'id': child_group_product_id
})
product_data['type'] = self.PRODUCT_GROUP
# todo: child product
product_child_src = get_list_from_list_by_field(products_ext['data']['post_variant'], 'post_parent', product['ID'])
all_child = dict()
child_attributes = dict()
if product_child_src:
product_data['type'] = self.PRODUCT_CONFIG
for product_child in product_child_src:
child_attributes[product_child['ID']] = dict()
child_data = self.construct_product_child()
child_data = self.add_construct_default(child_data)
child_meta = get_list_from_list_by_field(products_ext['data']['post_meta'], 'post_id', product_child['ID'])
child_data['id'] = product_child['ID']
child_data['sku'] = self.get_value_metadata(child_meta, '_sku', '') if self.get_value_metadata(child_meta, '_sku', '') else self.get_value_metadata(product_meta, '_sku', '')
child_data['code'] = product_child['post_name']
child_product_price = ''
if self.get_value_metadata(child_meta, '_regular_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_regular_price')
else:
if self.get_value_metadata(child_meta, '_price', ''):
child_product_price = self.get_value_metadata(child_meta, '_price', 0.0000)
else:
child_product_price = 0
if child_product_price == '' or not child_product_price:
child_product_price = 0
child_data['price'] = child_product_price
child_data['weight'] = self.get_value_metadata(child_meta, '_weight') if self.get_value_metadata(child_meta, '_weight') else product_data['weight']
child_data['length'] = self.get_value_metadata(child_meta, '_length') if self.get_value_metadata(child_meta, '_length') else product_data['length']
child_data['width'] = self.get_value_metadata(child_meta, '_width') if self.get_value_metadata(child_meta, '_width') else product_data['width']
child_data['height'] = self.get_value_metadata(child_meta, '_height') if self.get_value_metadata(child_meta, '_height') else product_data['height']
child_data['status'] = True if product_child['post_status'] == "publish" else False
child_data['manage_stock'] = True if self.get_value_metadata(child_meta, '_manage_stock') == 'yes' else False
if self.is_woo2woo():
child_data['is_in_stock'] = self.get_value_metadata(child_meta, '_stock_status', 'instock')
child_data['sold_individually'] = self.get_value_metadata(child_meta, '_sold_individually', '')
child_data['purchase_note'] = self.get_value_metadata(child_meta, '_purchase_note', '')
else:
child_data['is_in_stock'] = True if self.get_value_metadata(child_meta, '_stock_status', 'instock') == "instock" else False
child_data['qty'] = to_int(to_decimal(self.get_value_metadata(child_meta, '_stock'))) if self.get_value_metadata(child_meta, '_stock') else 0
child_data['created_at'] = convert_format_time(product_child['post_date'])
child_data['updated_at'] = convert_format_time(product_child['post_modified'])
child_data['name'] = product_child['post_title']
child_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_data['tax']['code'] = self.get_value_metadata(child_meta, '_tax_class', 'standard')
child_data['short_description'] = ''
# image_
thumbnail_id = self.get_value_metadata(child_meta, '_thumbnail_id')
if thumbnail_id:
thumbnail_src = get_list_from_list_by_field(products_ext['data']['image'], 'ID', thumbnail_id)
if thumbnail_src:
child_data['thumb_image']['label'] = thumbnail_src[0]['post_title']
child_data['thumb_image']['path'] = thumbnail_src[0]['meta_value']
child_data['thumb_image']['url'] = to_str(thumbnail_src[0]['guid']).replace(thumbnail_src[0]['meta_value'], '')
sale_price = self.get_value_metadata(child_meta, '_sale_price')
if sale_price != '':
child_data['special_price']['price'] = sale_price
child_data['special_price']['start_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_from'))
child_data['special_price']['end_date'] = convert_format_time(self.get_value_metadata(child_meta, '_sale_price_dates_to'))
child_product_language_data = self.construct_product_lang()
child_product_language_data['name'] = product_child['post_title']
child_product_language_data['description'] = self.get_value_metadata(child_meta, '_variation_description')
child_product_language_data['short_description'] = product_child['post_excerpt']
language_id = self._notice['src']['language_default']
child_data['languages'][language_id] = child_product_language_data
attr_child = self.get_list_from_list_by_field_as_first_key(child_meta, 'meta_key', 'attribute_')
child_data['options'] = list()
child_data['attributes'] = list()
for attribute in attr_child:
# attribute
attribute_child_data = self.construct_product_attribute()
attr_name = to_str(attribute['meta_key']).replace('attribute_', '')
element_type = 'tax_' + attr_name
attr_name = attr_name.replace('pa_', '')
attr_name = attr_name.strip()
option_id = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
attribute_child_data['option_id'] = option_id if option_id else ''
option_name = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label')
attribute_child_data['option_name'] = option_name if option_name else attr_name
option_code = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name')
attribute_child_data['option_code'] = option_code if option_code else attr_name.lower()
option_type = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type')
# attribute_child_data['option_type'] = option_type if option_type else 'select'
attribute_child_data['option_type'] = self.OPTION_SELECT
option_group = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby')
attribute_child_data['option_group'] = option_group if option_group else 'menu_order'
# attribute language
child_attribute_language_data = self.construct_product_option_lang()
child_attribute_language_data['option_name'] = attribute_child_data['option_name']
language_id = self._notice['src']['language_default']
attribute_child_data['option_languages'][language_id] = child_attribute_language_data
# values
attribute_child_data['option_value_id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
option_value_name = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
attribute_child_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_code'] = to_str(attribute['meta_value']).lower()
attribute_child_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') else ''
language_id = self._notice['src']['language_default']
child_attribute_value_language_data = self.construct_product_option_value_lang()
child_attribute_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else attribute['meta_value']
attribute_child_data['option_value_languages'][language_id] = child_attribute_value_language_data
child_data['attributes'].append(attribute_child_data)
# options
child_option_data = self.construct_product_option()
child_option_data['id'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_id')
child_option_data['code'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_name') else attr_name.lower()
child_option_data['option_name'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_label') else attr_name
child_option_data['option_code'] = child_option_data['code']
child_option_data['option_group'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_orderby') else 'menu_order'
# child_option_data['option_type'] = self.OPTION_SELECT
child_option_data['option_type'] = get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') if get_row_value_from_list_by_field(products_ext['data']['woocommerce_attribute_taxonomies'], 'attribute_name', attr_name, 'attribute_type') else 'select'
child_option_data['required'] = 1
# option language
child_option_language_data = self.construct_product_option_lang()
child_option_language_data['option_name'] = attr_name
language_id = self._notice['src']['language_default']
child_option_data['option_languages'][language_id] = child_option_language_data
# value option
child_option_value_data = self.construct_product_option_value()
child_option_value_data['id'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'term_id')
child_option_value_data['code'] = attribute['meta_value']
child_option_value_data['option_value_code'] = attribute['meta_value']
child_option_value_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') if get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name') else child_option_value_data['code']
child_option_value_data['option_value_description'] = get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'description') if get_row_value_from_list_by_field(products_ext['data']['term_relationship'], 'slug', attribute['meta_value'], 'name') else ''
# value language
child_option_value_language_data = self.construct_product_option_value_lang()
child_option_value_language_data['option_value_name'] = get_row_value_from_list_by_field(products_ext['data']['term_attribute'], 'slug', attribute['meta_value'], 'name')
language_id = self._notice['src']['language_default']
child_option_value_data['option_value_languages'][language_id] = child_option_value_language_data
child_option_data['values'].append(child_option_value_data)
child_attributes[product_child['ID']][child_option_data['option_name']] = child_option_value_data['option_value_name']
all_child[to_str(product_child['ID'])] = child_data
# todo: bundle product - product bundle plugin: WPC Product Bundles for WooCommerce (Premium)
if self._notice['src']['support']['product_bundle']:
product_data['bundle_selection'] = list()
product_bundles = get_row_value_from_list_by_field(product_meta, 'meta_key', 'woosb_ids', 'meta_value')
if product_bundles:
product_data['type'] = self.PRODUCT_BUNDLE
product_bundle_list = to_str(product_bundles).split(',')
if product_bundle_list and to_len(product_bundle_list) > 0:
for product_bundle_child in product_bundle_list:
product_bundle_ids = to_str(product_bundle_child).split('/')
if product_bundle_ids and to_len(product_bundle_ids) > 0:
product_bundle_data = {
'product_id': product_bundle_ids[0],
'selection_qty': product_bundle_ids[1] if to_len(product_bundle_ids) > 1 else 1
}
product_data['bundle_selection'].append(product_bundle_data)
if self.is_woo2woo():
product_data['children'] = list(all_child.values())
else:
len_child = 1
for attribute_variant in attribute_variants:
len_child *= to_len(attribute_variant['option_value_name'].split(';'))
options_src = dict()
for attribute_variant in attribute_variants:
values = to_str(attribute_variant['option_value_name']).split(';')
option_data = self.construct_product_option()
option_data['id'] = attribute_variant['option_id']
option_data['option_name'] = attribute_variant['option_name']
option_data['option_code'] = attribute_variant['option_code']
option_data['option_type'] = 'select'
for value in values:
if len_child > self.VARIANT_LIMIT:
option_data_value = self.construct_product_option_value()
option_data_value['option_value_name'] = value
option_data['values'].append(option_data_value)
opt_val = {
'option_name': attribute_variant['option_name'],
'option_code': attribute_variant['option_code'],
'option_languages': attribute_variant['option_languages'],
'option_id': attribute_variant['option_id'],
'option_value_name': value,
}
if attribute_variant['option_name'] not in options_src:
options_src[attribute_variant['option_name']] = list()
options_src[attribute_variant['option_name']].append(opt_val)
if len_child > self.VARIANT_LIMIT:
product_data['options'].append(option_data)
if len_child <= self.VARIANT_LIMIT and child_attributes:
combinations = self.combination_from_multi_dict(options_src)
list_child = list()
if combinations:
for combination in combinations:
if not combination:
continue
children_id = None
check_any = False
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination) and child_id not in list_child:
children_id = child_id
list_child.append(child_id)
break
if not children_id:
for child_id, child in child_attributes.items():
if self.check_sync_child(child, combination, True) and child_id not in list_child:
children_id = child_id
check_any = True
break
if not children_id:
continue
child = copy.deepcopy(all_child[children_id])
child['attributes'] = list()
for attribute in combination:
attribute_data = self.construct_product_attribute()
attribute_data['option_name'] = attribute['option_name']
attribute_data['option_code'] = attribute['option_code']
attribute_data['option_languages'] = attribute['option_languages']
attribute_data['option_id'] = attribute['option_id']
attribute_data['option_value_name'] = attribute['option_value_name']
child['attributes'].append(attribute_data)
product_data['children'].append(child)
else:
if attribute_variants:
product_data['attributes'] = attribute_variants
return response_success(product_data)
def get_product_id_import(self, convert, product, products_ext):
return product['ID']
def check_product_import(self, convert, product, products_ext):
return self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], lang = self._notice['target']['language_default'])
def update_latest_data_product(self, product_id, convert, product, products_ext):
all_query = list()
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
# todo: update product name
# begin
product_query = self.create_update_query_connector("posts", {'ID': product_id, 'post_title': convert['name']}, {'ID': product_id})
all_query.append(product_query)
# end
old_url_key = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['id'], convert['code'], 'code_desc')
# todo: update product category
# begin
category_desc = self.select_all_category_map()
all_categories = list()
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], lang = language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, lang = language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
# todo: delete old category product
query_cate = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` IN " + self.list_to_in_condition(category_desc) + ""
}
self.query_data_connector(query_cate, 'update_product')
for cate_id in all_categories:
query_cate_prod = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_term_relationships` WHERE `object_id` = " + to_str(product_id) + " AND `term_taxonomy_id` = " + to_str(cate_id) + ""
}
check_product_category = self.select_data_connector(query_cate_prod, 'category_product')
if (not check_product_category) or check_product_category['result'] != 'success' or (to_len(check_product_category['data']) == 0):
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_query.append(category_query)
# End
stock_status = 'instock'
if 'is_in_stock' in convert:
stock_status = 'instock' if convert['is_in_stock'] else 'outofstock'
else:
stock_status = 'outofstock' if convert['manage_stock'] else 'instock'
tax_class = ''
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], field = 'code_desc')
product_meta = {
'_stock_status': stock_status,
'_downloadable': "yes" if convert['type'] == self.PRODUCT_DOWNLOAD else "no",
'_virtual': "yes" if convert['type'] == self.PRODUCT_VIRTUAL else "no",
'_regular_price': convert['price'],
'_sale_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00') or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None) else "",
'_tax_status': convert['tax'].get('status', ("taxable" if to_int(convert['tax']['id']) or convert['tax']['code'] else "none")),
'_tax_class': tax_class if tax_class else '',
'_weight': convert['weight'] if convert['weight'] else '',
'_length': convert['length'] if convert['length'] else '',
'_width': convert['width'] if convert['width'] else '',
'_height': convert['height'] if convert['height'] else '',
'_sku': convert['sku'],
'_price': convert['special_price']['price'] if convert['special_price']['price'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else convert['price'],
'_manage_stock': "yes" if convert['manage_stock'] or convert['manage_stock'] == True else "no",
'_stock': convert['qty'] if convert['qty'] else 0,
# 'show_on_pos': '1' if convert['pos'] else 0,
}
if convert['special_price']['start_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_from'] = self.to_timestamp(convert['special_price']['start_date'])
if convert['special_price']['end_date'] and (self.to_timestamp(convert['special_price']['end_date']) > time.time() or (convert['special_price']['end_date'] == '0000-00-00' or convert['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
product_meta['_sale_price_dates_to'] = self.to_timestamp(convert['special_price']['end_date'])
if 'group_prices' in convert and to_len(convert['group_prices']) > 0:
product_meta['wholesale_customer_wholesale_price'] = convert['group_prices'][0]['price']
all_meta_queries = list()
for meta_key, meta_value in product_meta.items():
meta_insert = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(product_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': product_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query = self.create_update_query_connector("postmeta", meta_insert, {'post_id': product_id, 'meta_key': meta_key})
all_query.append(meta_query)
# todo: update children
children_list = list()
option_list = list()
if convert['children']:
children_list = convert['children']
else:
if convert['options']:
option_list = convert['options']
if self.count_child_from_option(convert['options']) <= self.VARIANT_LIMIT:
children_list = self.convert_option_to_child(option_list, convert)
if children_list and to_len(children_list) <= self.VARIANT_LIMIT:
for key_child, product_child in enumerate(children_list):
children_id = self.get_map_field_by_src(self.TYPE_CHILD, product_child['id'], product_child['code'], lang = language_code)
if not children_id:
continue
if product_child.get('is_in_stock'):
child_stock_status = 'instock' if product_child['is_in_stock'] else 'outofstock'
else:
child_stock_status = 'outofstock' if product_child['manage_stock'] else 'instock'
children_meta = {
'_stock_status': child_stock_status,
'_sku': product_child['sku'] if product_child['sku'] else '',
'_weight': product_child['weight'] if product_child['weight'] else '',
'_length': product_child['length'] if product_child['length'] else '',
'_width': product_child['width'] if product_child['width'] else '',
'_height': product_child['height'] if product_child['height'] else '',
'_manage_stock': "yes" if product_child['manage_stock'] else "no",
'_stock': product_child['qty'] if product_child['qty'] else 0,
'_regular_price': product_child['price'],
'_sale_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
'_price': product_child['special_price']['price'] if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)) else product_child['price'],
}
if product_child['special_price']['price'] and (self.to_timestamp(product_child['special_price']['end_date']) > time.time() or (product_child['special_price']['end_date'] == '0000-00-00' or product_child['special_price']['end_date'] == '0000-00-00 00:00:00' or convert['special_price']['end_date'] == '' or convert['special_price']['end_date'] == None)):
if product_child['special_price']['start_date']:
children_meta['_sale_price_dates_from'] = self.to_timestamp(product_child['special_price']['start_date'])
if product_child['special_price']['end_date']:
children_meta['_sale_price_dates_to'] = self.to_timestamp(product_child['special_price']['end_date'])
for meta_key, meta_value in children_meta.items():
meta_insert_child = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
if meta_key == '_sale_price_dates_from' or meta_key == '_sale_price_dates_to':
query_meta_key = {
'type': 'select',
'query': "SELECT * FROM `_DBPRF_postmeta` WHERE `post_id` = " + to_str(children_id) + " AND `meta_key` = " + to_str(meta_key) + ""
}
check_meta_key = self.select_data_connector(query_meta_key, 'postmeta')
if (not check_meta_key) or check_meta_key['result'] != 'success' or (not check_meta_key['data']) or (to_len(check_meta_key['data']) == 0):
sale_price_data = {
'post_id': children_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_price_query = self.create_insert_query_connector("postmeta", sale_price_data)
all_query.append(meta_price_query)
meta_query_child = self.create_update_query_connector('postmeta', meta_insert_child, {'post_id': children_id, 'meta_key': meta_key})
all_query.append(meta_query_child)
# todo: seo
# begin
if self.is_exist_lecm_rewrite():
if (self._notice['config']['seo'] or self._notice['config']['seo_301']) and convert['seo']:
delete_query = list()
delete_query.append(self.create_delete_query_connector('lecm_rewrite', {'type': 'product', 'type_id': product_id}))
self.query_multiple_data_connector(delete_query)
for seo_url in convert['seo']:
if not seo_url['request_path']:
continue
if old_url_key != seo_url['request_path'].replace(' ', ''):
query_check = {
'link': seo_url['request_path']
}
if self.is_wpml() and convert.get('language_code'):
query_check['lang'] = convert['language_code']
seo_query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_lecm_rewrite WHERE " + self.dict_to_where_condition(query_check)
}
check_seo_exit = self.select_data_connector(seo_query, 'lecm_rewrite')
if check_seo_exit and check_seo_exit['result'] == 'success' and to_len(check_seo_exit['data']) > 0:
continue
else:
le_url_rewrite = {
'link': to_str(seo_url['request_path']).rstrip('/'),
'type': 'product',
'type_id': product_id
}
if self.is_wpml():
le_url_rewrite['lang'] = convert.get('language_code')
if self._notice['config']['seo_301']:
le_url_rewrite['redirect_type'] = 301
self.import_data_connector(self.create_insert_query_connector("lecm_rewrite", le_url_rewrite), 'seo_product')
self.import_multiple_data_connector(all_query, 'update_product')
if self.is_wpml() and not convert.get('language_code'):
where_product_wpml = {
'migration_id': self._migration_id,
'type': 'product',
}
if convert['id']:
where_product_wpml['id_src'] = convert['id']
else:
where_product_wpml['code'] = convert['code']
product_wpml = self.select_obj(TABLE_MAP, where_product_wpml)
if product_wpml['result'] == 'success' and product_wpml['data']:
for product_wpml_row in product_wpml['data']:
if product_wpml_row['id_desc'] == product_id or not product_wpml_row.get('lang'):
continue
convert_wpml = self.get_convert_data_language(convert, target_language_id = language_code)
convert_wpml['language_code'] = product_wpml_row['lang']
self.update_latest_data_product(product_wpml_row['id_desc'], convert_wpml, product, products_ext)
return response_success()
def update_product_after_demo(self, product_id, convert, product, products_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
all_queries = list()
query_delete = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_term_relationships WHERE object_id = ' + to_str(product_id) + ' AND term_taxonomy_id IN (SELECT term_taxonomy_id FROM _DBPRF_term_taxonomy WHERE taxonomy IN ' + self.list_to_in_condition(['product_brand', 'product_cat']) + ')'
}
all_queries.append(query_delete)
# category
all_categories = list()
if convert['categories']:
for category in convert['categories']:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, None, category['code'], language_code)
if not category_id:
category_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], None, language_code)
if category_id:
all_categories.append(category_id)
all_categories = list(set(all_categories))
for cate_id in all_categories:
category_data = {
'object_id': product_id,
'term_taxonomy_id': cate_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", category_data)
all_queries.append(category_query)
if self._notice['target']['support']['manufacturers']:
if convert['manufacturer']['id'] or convert['manufacturer']['name']:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, convert['manufacturer']['id'])
if not manufacturer_id:
manufacturer_id = self.get_map_field_by_src(self.TYPE_MANUFACTURER, None, convert['manufacturer']['id'])
if manufacturer_id:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
category_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(category_query)
elif convert['manufacturer']['name']:
slug = self.sanitize_title(convert['manufacturer']['name'])
manufacturer_term = {
'name': convert['manufacturer']['name'],
'slug': slug,
'term_group': 0,
}
manufacturer_term_query = self.create_insert_query_connector('terms', manufacturer_term)
term_id = self.import_data_connector(manufacturer_term_query, 'manufacturer')
if not term_id:
return response_warning('Manufacturer ' + to_str(convert['id']) + ' import false.')
manufacturer_taxonomy = {
'term_id': term_id,
'taxonomy': 'product_brand',
'description': '',
'parent': 0,
'count': 0
}
manufacturer_taxonomy_query = self.create_insert_query_connector('term_taxonomy', manufacturer_taxonomy)
manufacturer_taxonomy_import = self.import_manufacturer_data_connector(manufacturer_taxonomy_query, True, convert['id'])
if manufacturer_taxonomy_import:
relationship_data = {
'object_id': product_id,
'term_taxonomy_id': manufacturer_id,
'term_order': 0
}
relationship_query = self.create_insert_query_connector("term_relationships", relationship_data)
all_queries.append(relationship_query)
self.insert_map(self.TYPE_MANUFACTURER, convert['manufacturer']['id'], manufacturer_taxonomy_import, convert['manufacturer']['name'])
if convert['tax']['id'] or convert['tax']['code']:
tax_class = self.get_map_field_by_src(self.TYPE_TAX, convert['tax']['id'], convert['tax']['code'], 'code_desc')
if tax_class:
meta_insert = {
'post_id': product_id,
'meta_key': '_tax_class',
'meta_value': tax_class
}
where_meta = {
'post_id': product_id,
'meta_key': '_tax_class',
}
all_queries.append(self.create_update_query_connector('postmeta', meta_insert, where_meta))
self.import_multiple_data_connector(all_queries, 'update_product')
return response_success()
def router_product_import(self, convert, product, products_ext):
return response_success('product_import')
def before_product_import(self, convert, product, products_ext):
return response_success()
def product_import(self, convert, product, products_ext):
product_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_content': convert['description'],
'post_title': convert['name'],
'post_excerpt': '',
'post_status': 'publish' if convert['status'] else 'closed',
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': convert['name'],
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': '',
'menu_order': 0,
'post_type': 'product',
'post_mime_type': '',
'comment_count': 0 ,
}
id_product = self.import_product_data_connector(self.create_insert_query_connector('posts', product_data))
self.insert_map(self.TYPE_PRODUCT, convert['id'], id_product, convert['code'])
thumbnail_id = False
product_image = ''
if convert['thumb_image']['url'] or convert['thumb_image']['path']:
image_process = self.process_image_before_import(convert['thumb_image']['url'],
convert['thumb_image']['path'])
image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(
self.make_woocommerce_image_path(image_process['path'], self.TYPE_PRODUCT),
self._notice['target']['config']['image_product'].rstrip('/')))
if image_import_path:
product_image = self.remove_prefix_path(image_import_path,
self._notice['target']['config']['image_product'])
image_details = self.get_sizes(image_process['url'])
thumbnail_id = self.wp_image(product_image, image_details, convert=convert)
product_meta = {
'_edit_lock': '',
'_edit_last': 1,
'_thumbnail_id': thumbnail_id,
'_regular_price': convert['price'],
'_sale_price': convert['price'],
'total_sales': 0,
'_tax_status': 'taxable',
'_tax_class': '',
'_manage_stock': 'yes' if convert['manage_stock'] else 'no',
'_backorders': 'no',
'_sold_individually': 'yes',
'_virtual': 'no',
'_downloadable': 'no',
'_download_limit': -1,
'_download_expiry': -1,
'_stock': convert['qty'],
'_stock_status': 'instock' if convert['qty'] else 'outofstock',
'_wc_average_rating': '',
'_wc_review_count': 0,
'_product_version': '4.5.1',
'_price': convert['price'],
'_weight': convert['weight'],
'_length': convert['length'],
'_width': convert['width'],
'_height': convert['height'],
'_sku': convert['sku'],
}
for meta_key, meta_value in product_meta.items():
meta_data = {
'post_id': id_product,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('postmeta', data=meta_data), 'products')
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy WHERE taxonomy = 'product_type'"
}
product_type = self.select_data_connector(query)['data'][0]['term_taxonomy_id']
for category in convert['categories']:
term_relate = {
'object_id': id_product,
'term_taxonomy_id': product_type
}
self.import_data_connector(self.create_insert_query_connector('term_relationships', data=term_relate), 'products')
id_category = self.get_map_field_by_src(self.TYPE_CATEGORY, category['id'], category['code'])
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy WHERE term_id = " + to_str(id_category)
}
id_taxonomy = self.select_data_connector(query)['data'][0]['term_taxonomy_id']
term_relate = {
'object_id': id_product,
'term_taxonomy_id': id_taxonomy
}
self.import_data_connector(self.create_insert_query_connector('term_relationships', data=term_relate), 'products')
return response_success(id_product)
def after_product_import(self, product_id, convert, product, products_ext):
return response_success()
def addition_product_import(self, convert, product, products_ext):
return response_success()
def finish_product_import(self):
if self.is_variant_limit:
self._notice['config']['variant_limit'] = True
return response_success()
# TODO: CUSTOMER
# def prepare_customers_import(self):
# return self
#
# def prepare_customers_export(self):
# return self
def prepare_customers_import(self):
if self._notice['config'].get('cus_pass'):
delete_query = {
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'LEPP_TYPE' OR option_name = 'LEPP_URL'"
}
config_delete = self.import_data_connector(delete_query)
all_queries = list()
type_data = {
'option_name': 'LEPP_TYPE',
'option_value': self._notice['src']['cart_type'],
'autoload': 'yes'
}
type_query = self.create_insert_query_connector('options', type_data)
all_queries.append(type_query)
url_data = {
'option_name': 'LEPP_URL',
'option_value': self._notice['src']['cart_url'],
'autoload': 'yes'
}
url_query = self.create_insert_query_connector('options', url_data)
all_queries.append(url_query)
if all_queries:
self.import_multiple_data_connector(all_queries, 'customer')
return self
def get_customers_main_export(self):
id_src = self._notice['process']['customers']['id_src']
limit = self._notice['setting']['customers']
prefix = self._notice['src']['config']['table_prefix']
if self._notice['src']['config'].get('site_id'):
prefix = to_str(prefix).replace(to_str(self._notice['src']['config'].get('site_id')) + '_', '')
query = {
'type': 'select',
'query': "SELECT * FROM " + prefix + "users u LEFT JOIN " + prefix + "usermeta um ON u.ID = um.user_id WHERE (um.meta_key = '_DBPRF_capabilities' AND um.meta_value LIKE '%customer%' OR um.meta_value LIKE '%subscriber%') AND ID > " + to_str(id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# customers = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
customers = self.select_data_connector(query, 'customers')
if not customers or customers['result'] != 'success':
return response_error()
return customers
def get_customers_ext_export(self, customers):
url_query = self.get_connector_url('query')
customers_ids = duplicate_field_value_from_list(customers['data'], 'ID')
customer_ext_queries = {
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE user_id IN " + self.list_to_in_condition(
customers_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
customer_ext_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
customer_ext_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE (order_id IS NULL OR order_id = '') AND user_id IN " + self.list_to_in_condition(customers_ids),
}
# customers_ext = self.get_connector_data(url_query,
# {'serialize': True, 'query': json.dumps(customer_ext_queries)})
customers_ext = self.select_multiple_data_connector(customer_ext_queries, 'customers')
if not customers_ext or customers_ext['result'] != 'success':
return response_error()
return customers_ext
def convert_customer_export(self, customer, customers_ext):
customer_data = self.construct_customer()
customer_data = self.add_construct_default(customer_data)
customer_data['id'] = customer['ID']
customer_data['code'] = customer['user_login']
customer_data['username'] = customer['user_nicename']
customer_data['email'] = customer['user_email']
customer_data['password'] = customer['user_pass']
customer_data['website'] = customer['user_url']
customer_data['user_url'] = customer['user_url']
customer_data['active'] = True
customer_data['created_at'] = convert_format_time(customer['user_registered'])
customer_meta = get_list_from_list_by_field(customers_ext['data']['user_meta'], 'user_id', customer['ID'])
customer_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'first_name', 'meta_value')
customer_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'last_name', 'meta_value')
prefix = self._notice['src']['config']['table_prefix']
capabilities = to_str(prefix) + '_capabilities'
customer_data['capabilities'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', capabilities, 'meta_value')
# billing
address_data = self.construct_customer_address()
address_data['code'] = to_str(customer['ID']) + "_1"
address_data['first_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_first_name', 'meta_value')
address_data['last_name'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_last_name', 'meta_value')
address_data['address_1'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_1', 'meta_value')
address_data['address_2'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_address_2', 'meta_value')
address_data['city'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_city', 'meta_value')
address_data['postcode'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_postcode', 'meta_value')
address_data['telephone'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_phone', 'meta_value')
address_data['company'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_company', 'meta_value')
address_data['fax'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_fax', 'meta_value')
address_data['country']['country_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_country', 'meta_value')
address_data['country']['code'] = address_data['country']['country_code']
address_data['country']['name'] = self.get_country_name_by_code(address_data['country']['country_code'])
address_data['state']['state_code'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'billing_state', 'meta_value')
address_data['state']['code'] = address_data['state']['state_code']
address_data['default']['billing'] = True
if address_data['address_1'] or address_data['address_2']:
customer_data['address'].append(address_data)
# shipping
shipping_address = self.get_list_from_list_by_field_as_first_key(customer_meta, 'meta_key', 'shipping_')
if shipping_address:
shipping_data = self.construct_customer_address()
shipping_data['code'] = to_str(customer['ID']) + "_2"
shipping_data['first_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_first_name', 'meta_value')
shipping_data['last_name'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_last_name', 'meta_value')
shipping_data['address_1'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_1', 'meta_value')
shipping_data['address_2'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_address_2', 'meta_value')
shipping_data['city'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_city', 'meta_value')
shipping_data['postcode'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_postcode', 'meta_value')
shipping_data['telephone'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_phone', 'meta_value')
shipping_data['company'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_company', 'meta_value')
shipping_data['fax'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_fax', 'meta_value')
shipping_data['country']['country_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_country', 'meta_value')
shipping_data['country']['code'] = shipping_data['country']['country_code']
shipping_data['country']['name'] = self.get_country_name_by_code(shipping_data['country']['code'])
shipping_data['state']['state_code'] = get_row_value_from_list_by_field(shipping_address, 'meta_key', 'shipping_state', 'meta_value')
shipping_data['state']['code'] = shipping_data['state']['state_code']
shipping_data['default']['shipping'] = True
if shipping_data['address_1'] or shipping_data['address_2']:
customer_data['address'].append(shipping_data)
# customer_data['first_name'] = customer_data['first_name'] if customer_data['first_name']: address_data['first_name']
# customer_data['last_name'] = customer_data['last_name'] if customer_data['last_name']: address_data['last_name']
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
customer_point_rewards['points_balance'] = get_row_value_from_list_by_field(customer_meta, 'meta_key', 'wc_points_balance', 'meta_value')
wc_points_rewards_user_points = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points'], 'user_id', customer['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['order_id'] = points_rewards_user_points['order_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(customers_ext['data']['wc_points_rewards_user_points_log'], 'user_id', customer['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['order_id'] = points_rewards_user_points_log['order_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
customer_data['point_rewards'] = customer_point_rewards
return response_success(customer_data)
def get_customer_id_import(self, convert, customer, customers_ext):
return customer['ID']
def check_customer_import(self, convert, customer, customers_ext):
return True if self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['id'], convert['code']) else False
def router_customer_import(self, convert, customer, customers_ext):
return response_success('customer_import')
def before_customer_import(self, convert, customer, customers_ext):
return response_success()
def customer_import(self, convert, customer, customers_ext):
customer_data = {
'user_login': convert['username'],
'user_pass': convert['password'],
'user_nicename': convert['username'],
'user_email': convert['email'],
'user_url': '',
'user_registered': convert['created_at'],
'user_activation_key': '',
'user_status': True,
'display_name': convert['first_name'],
}
id_customer = self.import_customer_data_connector(self.create_insert_query_connector('users', customer_data))
self.insert_map(self.TYPE_CUSTOMER, convert['id'], id_customer, convert['code'])
user_data = {
'nickname': convert['username'],
'first_name': convert['first_name'],
'last_name': convert['last_name'],
'description': '',
'rich_editing': True,
'syntax_highlighting': True,
'comment_shortcuts': False,
'admin_color': 'fresh',
'use_ssl': 0,
'show_admin_bar_front': True,
'locale': '',
'wp_capabilities': 'a:1:{s:8:"customer";b:1;}',
'dismissed_wp_pointers': ''
}
for meta_key, meta_value in user_data.items():
meta_data = {
'user_id': id_customer,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('usermeta', data=meta_data), 'customers')
return response_success(id_customer)
def after_customer_import(self, customer_id, convert, customer, customers_ext):
return response_success()
def addition_customer_import(self, convert, customer, customers_ext):
return response_success()
# TODO: ORDER
def prepare_orders_import(self):
return self
def prepare_orders_export(self):
return self
def get_orders_main_export(self):
id_src = self._notice['process']['orders']['id_src']
limit = self._notice['setting']['orders']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order' AND post_status NOT IN ('inherit','auto-draft') AND ID > " + to_str(
id_src) + " ORDER BY ID ASC LIMIT " + to_str(limit)
}
# orders = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
orders = self.select_data_connector(query, 'orders')
if not orders or orders['result'] != 'success':
return response_error()
return orders
def get_orders_ext_export(self, orders):
url_query = self.get_connector_url('query')
order_ids = duplicate_field_value_from_list(orders['data'], 'ID')
customer_ext_queries = {
'woocommerce_order_items': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_items WHERE order_id IN " + self.list_to_in_condition(
order_ids),
},
'order_note': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_comments WHERE comment_post_ID IN " + self.list_to_in_condition(
order_ids),
},
'order_refund': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE post_type = 'shop_order_refund' AND post_parent IN " + self.list_to_in_condition(
order_ids),
},
'order_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(order_ids),
},
}
orders_ext = self.select_multiple_data_connector(customer_ext_queries, 'orders')
if not orders_ext or orders_ext['result'] != 'success':
return response_error()
# product_option_value_ids = duplicate_field_value_from_list(orders_ext['data']['order_option'], 'product_option_value_id')
# order_recurrings = duplicate_field_value_from_list(orders_ext['data']['order_recurring'], 'order_recurring_id')
order_item_ids = duplicate_field_value_from_list(orders_ext['data']['woocommerce_order_items'], 'order_item_id')
comment_ids = duplicate_field_value_from_list(orders_ext['data']['order_note'], 'comment_ID')
refund_ids = duplicate_field_value_from_list(orders_ext['data']['order_refund'], 'ID')
post_meta_ids = list(set(refund_ids + order_ids))
cus_list = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'meta_key', '_customer_user')
cus_ids = list()
if cus_list:
cus_ids = duplicate_field_value_from_list(cus_list, 'meta_value')
orders_ext_rel_queries = {
'woocommerce_order_itemmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN " + self.list_to_in_condition(order_item_ids),
},
'order_note_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(comment_ids),
},
'postmeta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(post_meta_ids),
},
'user': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_users WHERE ID IN " + self.list_to_in_condition(cus_ids),
},
'user_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_usermeta WHERE meta_key IN ('first_name','last_name') AND user_id IN " + self.list_to_in_condition(cus_ids),
}
}
if self._notice['src']['support'].get('customer_point_rewards'):
orders_ext_rel_queries['wc_points_rewards_user_points'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel_queries['wc_points_rewards_user_points_log'] = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_wc_points_rewards_user_points_log WHERE order_id IN " + self.list_to_in_condition(order_ids),
}
orders_ext_rel = self.select_multiple_data_connector(orders_ext_rel_queries, 'orders')
if not orders_ext_rel or orders_ext_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_rel)
pro_list = get_list_from_list_by_field(orders_ext_rel['data']['woocommerce_order_itemmeta'], 'meta_key', '_product_id')
pro_ids = duplicate_field_value_from_list(pro_list, 'meta_value')
orders_ext_third_rel_queries = {
'products_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + self.list_to_in_condition(pro_ids),
},
}
orders_ext_third_rel = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(orders_ext_third_rel_queries)})
if not orders_ext_third_rel or orders_ext_third_rel['result'] != 'success':
return response_error()
orders_ext = self.sync_connector_object(orders_ext, orders_ext_third_rel)
return orders_ext
def convert_order_export(self, order, orders_ext):
order_data = self.construct_order()
order_data = self.add_construct_default(order_data)
order_data['id'] = order['ID']
order_data['status'] = order['post_status']
# order data
order_items = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_items'], 'order_id', order['ID'])
shipping = get_row_from_list_by_field(order_items, 'order_item_type', 'shipping')
taxes = get_list_from_list_by_field(order_items, 'order_item_type', 'tax')
tax_names = list()
total_tax = 0.0
if taxes:
tax_names = duplicate_field_value_from_list(taxes, 'order_item_name')
for tax in taxes:
order_tax_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', tax['order_item_id'])
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'tax_amount', 0.0))
total_tax += to_decimal(self.get_value_metadata(order_tax_metas, 'shipping_tax_amount', 0.0))
if 'postmeta' in orders_ext['data']:
order_meta = get_list_from_list_by_field(orders_ext['data']['postmeta'], 'post_id', order['ID'])
else:
order_meta = get_list_from_list_by_field(orders_ext['data']['order_meta'], 'post_id', order['ID'])
ord_number = get_row_value_from_list_by_field(order_meta, 'meta_key', '_order_number', 'meta_value')
if ord_number and self._notice['src']['support'].get('plugin_pre_ord'):
order_data['order_number'] = ord_number
order_data['tax']['title'] = '|'.join(tax_names) if tax_names else 'Tax'
order_data['tax']['amount'] = total_tax if total_tax else self.get_value_metadata(order_meta, '_order_tax', 0.0000)
order_data['shipping']['title'] = shipping['order_item_name'] if shipping else 'Shipping'
order_data['shipping']['amount'] = self.get_value_metadata(order_meta, '_order_shipping', 0.0000) # _order_shipping_tax
discount_title = get_row_value_from_list_by_field(order_items, 'order_item_type', 'coupon', 'order_item_name')
order_data['discount']['title'] = discount_title if discount_title else 'Discount'
order_data['discount']['amount'] = self.get_value_metadata(order_meta, '_cart_discount', 0.0000)
order_data['total']['title'] = 'Total'
order_data['total']['amount'] = self.get_value_metadata(order_meta, '_order_total', 0.0000)
order_data['subtotal']['title'] = 'Total'
order_data['subtotal']['amount'] = to_decimal(self.get_value_metadata(order_meta, '_order_total', 0.0000)) - to_decimal(self.get_value_metadata(order_meta, '_cart_discount', 0.0000)) - to_decimal(order_data['tax']['amount']) - to_decimal(order_data['shipping']['amount'])
order_data['currency'] = self.get_value_metadata(order_meta, '_order_currency', 'meta_value')
order_data['created_at'] = convert_format_time(order['post_date'])
order_data['updated_at'] = convert_format_time(order['post_modified'])
# order customer
order_customer = self.construct_order_customer()
order_customer = self.add_construct_default(order_customer)
order_customer_src = self.get_value_metadata(order_meta, '_customer_user', 'meta_value')
if order_customer_src and to_int(order_customer_src) > 0:
customer_src = get_row_from_list_by_field(orders_ext['data']['user'], 'ID', order_customer_src)
customer_meta_src = get_list_from_list_by_field(orders_ext['data']['user_meta'], 'user_id', order_customer_src)
if customer_src:
order_customer['id'] = order_customer_src
order_customer['code'] = get_value_by_key_in_dict(customer_src, 'user_login', '')
order_customer['email'] = get_value_by_key_in_dict(customer_src, 'user_email', self.get_value_metadata(order_meta, '_billing_email', 'meta_value'))
order_customer['username'] = get_value_by_key_in_dict(customer_src, 'display_name', '')
order_customer['first_name'] = self.get_value_metadata(customer_meta_src, 'first_name', self.get_value_metadata(order_meta, '_billing_first_name', ''))
order_customer['last_name'] = self.get_value_metadata(customer_meta_src, 'last_name', self.get_value_metadata(order_meta, '_billing_last_name', ''))
else:
order_customer['email'] = self.get_value_metadata(order_meta, '_billing_email', 'meta_value')
order_customer['username'] = order_customer['email']
order_customer['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_customer['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_data['customer'] = order_customer
# TODO: Plugin WooCommerce Points and Rewards
if self._notice['src']['support'].get('customer_point_rewards'):
customer_point_rewards = dict()
customer_point_rewards['reward_point'] = list()
customer_point_rewards['reward_point_log'] = list()
wc_points_rewards_user_points = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points'], 'order_id', order['ID'])
if wc_points_rewards_user_points:
for points_rewards_user_points in wc_points_rewards_user_points:
reward_point = dict()
reward_point['points'] = points_rewards_user_points['points']
reward_point['points_balance'] = points_rewards_user_points['points_balance']
reward_point['user_id'] = points_rewards_user_points['user_id']
reward_point['created_at'] = points_rewards_user_points['date']
customer_point_rewards['reward_point'].append(reward_point)
wc_points_rewards_user_points_log = get_list_from_list_by_field(orders_ext['data']['wc_points_rewards_user_points_log'], 'order_id', order['ID'])
if wc_points_rewards_user_points_log:
for points_rewards_user_points_log in wc_points_rewards_user_points_log:
reward_point_log = dict()
reward_point_log['points'] = points_rewards_user_points_log['points']
reward_point_log['type'] = points_rewards_user_points_log['type']
reward_point_log['user_points_id'] = points_rewards_user_points_log['user_points_id']
reward_point_log['user_id'] = points_rewards_user_points_log['user_id']
reward_point_log['admin_user_id'] = points_rewards_user_points_log['admin_user_id']
reward_point_log['data'] = points_rewards_user_points_log['data']
reward_point_log['created_at'] = points_rewards_user_points_log['date']
customer_point_rewards['reward_point_log'].append(reward_point_log)
order_data['point_rewards'] = customer_point_rewards
# customer address
customer_address = self.construct_order_address()
customer_address = self.add_construct_default(customer_address)
customer_address['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
customer_address['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
customer_address['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
customer_address['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
customer_address['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
customer_address['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
customer_address['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
customer_address['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
customer_address['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
customer_address['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
customer_address['country']['name'] = self.get_country_name_by_code(customer_address['country']['country_code'])
customer_address['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
customer_address['state']['code'] = customer_address['state']['state_code']
order_data['customer_address'] = customer_address
# billing address
order_billing = self.construct_order_address()
order_billing = self.add_construct_default(order_billing)
order_billing['first_name'] = self.get_value_metadata(order_meta, '_billing_first_name', '')
order_billing['last_name'] = self.get_value_metadata(order_meta, '_billing_last_name', '')
order_billing['email'] = self.get_value_metadata(order_meta, '_billing_email', '')
order_billing['address_1'] = self.get_value_metadata(order_meta, '_billing_address_1', '')
order_billing['address_2'] = self.get_value_metadata(order_meta, '_billing_address_2', '')
order_billing['city'] = self.get_value_metadata(order_meta, '_billing_city', '')
order_billing['postcode'] = self.get_value_metadata(order_meta, '_billing_postcode', '')
order_billing['telephone'] = self.get_value_metadata(order_meta, '_billing_phone', '')
order_billing['company'] = self.get_value_metadata(order_meta, '_billing_company', '')
order_billing['country']['code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['country_code'] = self.get_value_metadata(order_meta, '_billing_country', '')
order_billing['country']['name'] = self.get_country_name_by_code(order_billing['country']['country_code'])
order_billing['state']['state_code'] = self.get_value_metadata(order_meta, '_billing_state', '')
order_billing['state']['code'] = order_billing['state']['state_code']
order_billing['code'] = self.convert_attribute_code(to_str(order_billing['first_name']) + '-' + to_str(order_billing['last_name']) + '-' + to_str(order_billing['address_1']) + '-' + to_str(order_billing['address_2']))
order_data['billing_address'] = order_billing
# shipping address
order_delivery = self.construct_order_address()
order_delivery = self.add_construct_default(order_delivery)
order_delivery['first_name'] = self.get_value_metadata(order_meta, '_shipping_first_name', '')
order_delivery['last_name'] = self.get_value_metadata(order_meta, '_shipping_last_name', '')
order_delivery['email'] = self.get_value_metadata(order_meta, '_shipping_email', '')
order_delivery['address_1'] = self.get_value_metadata(order_meta, '_shipping_address_1', '')
order_delivery['address_2'] = self.get_value_metadata(order_meta, '_shipping_address_2', '')
order_delivery['city'] = self.get_value_metadata(order_meta, '_shipping_city', '')
order_delivery['postcode'] = self.get_value_metadata(order_meta, '_shipping_postcode', '')
order_delivery['telephone'] = self.get_value_metadata(order_meta, '_shipping_phone', '') if self.get_value_metadata(order_meta, '_shipping_phone', '') else self.get_value_metadata(order_meta, '_shipping_Phone_No', '')
order_delivery['company'] = self.get_value_metadata(order_meta, '_shipping_company', '')
order_delivery['country']['code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['country_code'] = self.get_value_metadata(order_meta, '_shipping_country', '')
order_delivery['country']['name'] = self.get_country_name_by_code(order_delivery['country']['country_code'])
order_delivery['state']['state_code'] = self.get_value_metadata(order_meta, '_shipping_state', '')
order_delivery['state']['code'] = order_delivery['state']['state_code']
order_delivery['code'] = self.convert_attribute_code(to_str(order_delivery['first_name']) + '-' + to_str(order_delivery['last_name']) + '-' + to_str(order_delivery['address_1']) + '-' + to_str(order_delivery['address_2']))
order_data['shipping_address'] = order_delivery
# order_data['user_history'] = self.get_value_metadata(order_meta, '_user_history', '')
order_products = get_list_from_list_by_field(order_items, 'order_item_type', 'line_item')
order_items = list()
for order_product in order_products:
order_product_metas = get_list_from_list_by_field(orders_ext['data']['woocommerce_order_itemmeta'], 'order_item_id', order_product['order_item_id'])
qty = self.get_value_metadata(order_product_metas, '_qty', 1)
if to_int(qty) == 0:
qty = 1
order_item_subtotal = self.get_value_metadata(order_product_metas, '_line_subtotal', 0.0000)
order_item = self.construct_order_item()
order_item = self.add_construct_default(order_item)
order_item['id'] = order_product['order_item_id']
order_item['product']['id'] = self.get_value_metadata(order_product_metas, '_variation_id', self.get_value_metadata(order_product_metas, '_product_id', 0))
order_item['product']['code'] = self.get_value_metadata(order_product_metas, '_product_code', 0)
product_meta = get_list_from_list_by_field(orders_ext['data']['products_meta'], 'post_id', order_item['product']['id'])
order_item['product']['sku'] = self.get_value_metadata(product_meta, '_sku', '')
order_item['product']['name'] = order_product['order_item_name']
order_item['qty'] = to_decimal(qty) if qty != '' else 1
order_item['price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['original_price'] = to_decimal(order_item_subtotal) / to_decimal(qty) if (qty != 0 and qty != '') else 0
order_item['tax_amount'] = self.get_value_metadata(order_product_metas, '_line_tax', 0.0000)
order_item['subtotal'] = order_item_subtotal
order_item['total'] = self.get_value_metadata(order_product_metas, '_line_total', 0.0000)
order_item['options'] = list()
if order_product['order_item_type'] == 'line_item':
order_item_options = list()
keys = {'_qty', '_tax_class', '_product_id', '_variation_id', '_line_subtotal', '_line_subtotal_tax',
'_line_total', '_line_tax', '_line_tax_data', '_original_order_item_id'}
for order_product_meta in order_product_metas:
if order_product_meta['meta_key'] not in keys:
order_item_option = self.construct_order_item_option()
# order_item_option['option_name'] = order_product_meta['meta_key']
order_item_option['option_name'] = unquote(order_product_meta['meta_key'])
if order_item_option['option_name'] and 'pa_' in order_item_option['option_name']:
continue
order_item_option['option_value_name'] = order_product_meta['meta_value']
# unquote(order_product['order_item_name'])
order_item_options.append(order_item_option)
order_item['options'] = order_item_options
order_items.append(order_item)
order_data['items'] = order_items
order_notes = get_list_from_list_by_field(orders_ext['data']['order_note'], 'comment_post_ID', order['ID'])
order_history = list()
for order_note in order_notes:
order_note_meta = get_list_from_list_by_field(orders_ext['data']['order_note_meta'], 'comment_id', order_note['comment_ID'])
order_history = self.construct_order_history()
order_history = self.add_construct_default(order_history)
order_history['id'] = order_note['comment_ID']
order_history['status'] = order_note['comment_approved']
order_history['comment'] = order_note['comment_content']
order_history['notified'] = self.get_value_metadata(order_note_meta, 'is_customer_note', False)
order_history['created_at'] = convert_format_time(order_note['comment_date'])
order_data['history'].append(order_history)
order_payment = self.construct_order_payment()
order_payment = self.add_construct_default(order_payment)
order_payment['id'] = order['ID']
order_payment['method'] = self.get_value_metadata(order_meta, '_payment_method')
order_payment['title'] = self.get_value_metadata(order_meta, '_payment_method_title')
# custom order_number plugin WooCommerce Sequential Order Numbers
# order_data['order_number'] = self.get_value_metadata(order_meta, '_order_number', '')
# order_data['order_number_formatted'] = self.get_value_metadata(order_meta, '_order_number_formatted', '')
# order_data['order_number_meta'] = self.get_value_metadata(order_meta, '_order_number_meta', '')
order_data['payment'] = order_payment
return response_success(order_data)
def get_order_id_import(self, convert, order, orders_ext):
return order['ID']
def check_order_import(self, convert, order, orders_ext):
return self.get_map_field_by_src(self.TYPE_ORDER, convert['id'], convert['code'])
def update_order_after_demo(self, order_id, convert, order, orders_ext):
all_queries = list()
delete_query = list()
# order item
delete_query_child = {
'type': 'delete',
'query': 'DELETE FROM _DBPRF_woocommerce_order_itemmeta WHERE order_item_id IN (SELECT order_item_id FROM _DBPFF_woocommerce_order_items WHERE order_id = ' + to_str(order_id) + ')'
}
delete_query.append(delete_query_child)
delete_query.append(self.create_delete_query_connector('woocommerce_order_items', {'order_id': order_id}))
self.import_multiple_data_connector(delete_query, 'delete_ord_update')
order_items = convert['items']
for item in order_items:
order_item_data = {
'order_item_name': item['product']['name'],
'order_item_type': 'line_item',
'order_id': order_id
}
order_item_query = self.create_insert_query_connector("woocommerce_order_items", order_item_data)
order_item_id = self.import_data_connector(order_item_query, 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, item['product']['id'])
if not product_id:
product_id = 0
order_item_meta = {
'_qty': item['qty'],
'_tax_class': '',
'_product_id': product_id,
'_variation_id': '',
'_line_subtotal': item['subtotal'],
'_line_total': item['total'],
'_line_subtotal_tax': 0,
'_line_tax': 0,
'_line_tax_data': php_serialize({
'total': 0,
'subtotal': 0
}),
}
for meta_key, meta_value in order_item_meta.items():
meta_insert = {
'order_item_id': order_item_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
for option in item['options']:
meta_insert = {
'order_item_id': order_item_id,
'meta_key': option['option_name'],
'meta_value': option['option_value_name']
}
meta_query = self.create_insert_query_connector("woocommerce_order_itemmeta", meta_insert)
all_queries.append(meta_query)
return response_success()
def router_order_import(self, convert, order, orders_ext):
return response_success('order_import')
def before_order_import(self, convert, order, orders_ext):
return response_success()
def order_import(self, convert, order, orders_ext):
order_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': '',
'post_excerpt': '',
'post_status': 'publish' if convert['status'] else 'closed',
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': '',
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': '',
'menu_order': 0,
'post_type': 'shop_order',
'post_mime_type': '',
'comment_count': 0,
}
id_order = self.import_product_data_connector(self.create_insert_query_connector('posts', order_data))
self.insert_map(self.TYPE_ORDER, convert['id'], id_order, convert['code'])
id_customers = self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['customer']['id'])
order_meta = {
'_edit_lock': '',
'_edit_last': 1,
'customer_user': id_customers,
'_order_currency': convert['currency'],
'_order_shipping_tax': convert['shipping']['amount'],
'_order_tax': convert['tax']['amount'],
'_order_total': convert['total']['amount'],
'_order_version': '4.5.1',
'_prices_include_tax': 'no',
# '_billing_address_index': convert['billing_address']['first_name'] + ' '
# + convert['billing_address']['last_name'] + ' '
# + convert['billing_address']['company'] + ' '
# + convert['billing_address']['address_1'] + ' '
# + convert['billing_address']['address_2'] + ' '
# + convert['billing_address']['city'] + ' '
# + convert['billing_address']['state']['name'] + ' '
# + convert['billing_address']['postcode'] + ' '
# + convert['billing_address']['country']['country_code'] + ' '
# + convert['customer']['email'] + ' '
# + convert['billing_address']['telephone'],
# '_shipping_address_index': convert['shipping_address']['first_name'] + ' '
# + convert['shipping_address']['last_name'] + ' '
# + convert['shipping_address']['company'] + ' '
# + convert['shipping_address']['address_1'] + ' '
# + convert['shipping_address']['address_2'] + ' '
# + convert['shipping_address']['city'] + ' '
# + convert['shipping_address']['country']['name'] + ' '
# + convert['shipping_address']['postcode'] + ' '
# + convert['shipping_address']['country']['country_code'] + ' '
# + convert['customer']['email'] + ' '
# + convert['shipping_address']['telephone'],
'_cart_discount': 0,
'_cart_discount_tax': 0,
'_order_shipping': 0,
'_order_key': '',
'_payment_method': convert['payment']['method'],
'_payment_method_title': convert['payment']['title'],
'_created_via': 'migrate',
'_date_paid': '',
'_billing_first_name': convert['billing_address']['first_name'],
'_billing_last_name': convert['billing_address']['last_name'],
'_billing_company': convert['billing_address']['company'],
'_billing_address_1': convert['billing_address']['address_1'],
'_billing_address_2': convert['billing_address']['address_2'],
'_billing_city': convert['billing_address']['city'],
'_billing_state': convert['billing_address']['state']['name'],
'_billing_postcode': convert['billing_address']['postcode'],
'_billing_country': convert['billing_address']['country']['country_code'],
'_billing_email': convert['customer']['email'],
'_billing_phone': convert['billing_address']['telephone'],
'_shipping_first_name': convert['shipping_address']['first_name'],
'_shipping_last_name': convert['shipping_address']['last_name'],
'_shipping_company': convert['shipping_address']['company'],
'_shipping_address_1': convert['shipping_address']['address_1'],
'_shipping_address_2': convert['shipping_address']['address_2'],
'_shipping_city': convert['shipping_address']['city'],
'_shipping_state': convert['shipping_address']['state']['name'],
'_shipping_postcode': convert['shipping_address']['postcode'],
'_shipping_country': convert['billing_address']['country']['country_code'],
'_paid_date': convert['created_at'],
'_download_permissions_granted': 'yes',
'_recorded_sales': 'yes',
'_recorded_coupon_usage_counts': 'yes',
'_order_stock_reduced': 'yes',
}
for meta_key, meta_value in order_meta.items():
data = {
'post_id': id_order,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('postmeta', data=data), 'order')
for item in convert['items']:
item_name = item['product']['name']
woo_data = {
'order_item_name': item_name,
'order_item_type': 'line_item',
'order_id': id_order
}
id_order_item = self.import_data_connector(self.create_insert_query_connector('woocommerce_order_items', data=woo_data), 'order')
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, item['product']['id'])
woo_item_meta = {
'_product_id': product_id,
'_variation_id': 0,
'_qty': item['qty'],
'tax_class': '',
'_line_subtotal': item['subtotal'],
'_line_subtotal_tax': '',
'_line_total': item['total'],
'_line_tax': item['tax_amount'],
'_line_tax_data': 'a:2:{s:8:"subtotal";a:0:{}s:5:"total";a:0:{}}',
'_reduced_stock': item['qty']
}
for meta_key, meta_value in woo_item_meta.items():
woo_item_data = {
'order_item_id': id_order_item,
'meta_key': meta_key,
'meta_value': meta_value
}
self.import_data_connector(self.create_insert_query_connector('woocommerce_order_itemmeta', data=woo_item_data), 'order')
return response_success(id_order)
def after_order_import(self, order_id, convert, order, orders_ext):
return response_success()
def addition_order_import(self, convert, order, orders_ext):
return response_success()
# TODO: REVIEW
def prepare_reviews_import(self):
return self
def prepare_reviews_export(self):
return self
def get_reviews_main_export(self):
id_src = self._notice['process']['reviews']['id_src']
limit = self._notice['setting']['reviews']
query = {
'type': 'select',
'query': "SELECT cm.*, p.post_type FROM _DBPRF_comments AS cm "
"LEFT JOIN _DBPRF_posts AS p ON p.ID = cm.comment_post_ID "
"WHERE p.post_type = 'product' AND cm.comment_ID > " + to_str(
id_src) + " ORDER BY cm.comment_ID ASC LIMIT " + to_str(limit)
}
# reviews = self.get_connector_data(self.get_connector_url('query'), {'query': json.dumps(query)})
reviews = self.select_data_connector(query, 'reviews')
if not reviews or reviews['result'] != 'success':
return response_error()
return reviews
def get_product_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE meta_key = '_downloadable_files' AND post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
return php_unserialize(products['data'][0]['meta_value'])
def get_download_data(self, product_id):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id = " + to_str(product_id)
}
products = self.select_data_connector(query, 'products')
if not products or products['result'] != 'success' or len(products['data']) == 0:
return None
download_data = dict()
for data in products['data']:
if data['meta_key'] in ['_download_expiry', '_download_limit']:
download_data[data['meta_key']] = data['meta_value'] if to_int(data['meta_value']) > 0 else None
return download_data
def get_reviews_ext_export(self, reviews):
url_query = self.get_connector_url('query')
reviews_ids = duplicate_field_value_from_list(reviews['data'], 'comment_ID')
product_ids = duplicate_field_value_from_list(reviews['data'], 'comment_post_ID')
review_ext_queries = {
'comment_meta': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_commentmeta WHERE comment_id IN " + self.list_to_in_condition(
reviews_ids),
},
'product_info': {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID IN " + self.list_to_in_condition(product_ids),
}
}
# reviews_ext = self.get_connector_data(url_query, {'serialize': True, 'query': json.dumps(review_ext_queries)})
reviews_ext = self.select_multiple_data_connector(review_ext_queries, 'reviews')
if not reviews_ext or reviews_ext['result'] != 'success':
return response_error()
return reviews_ext
def convert_review_export(self, review, reviews_ext):
review_data = self.construct_review()
# review_data = self.add(review_data)
review_data['id'] = review['comment_ID']
product_info = get_row_from_list_by_field(reviews_ext['data']['product_info'], 'ID', review['comment_post_ID'])
review_data['product']['id'] = review['comment_post_ID']
if product_info:
review_data['product']['code'] = product_info['post_name']
review_data['product']['name'] = product_info['post_title']
review_data['customer']['id'] = review['user_id']
review_data['customer']['code'] = review['comment_author_email']
review_data['customer']['name'] = review['comment_author']
review_data['title'] = ''
review_data['content'] = review['comment_content']
rv_status = {
'0': 2, # pending
'1': 1, # approved
'spam': 3 # not approved
}
review_data['status'] = rv_status.get(to_str(review['comment_approved']), 'spam')
review_data['created_at'] = convert_format_time(review['comment_date'])
review_data['updated_at'] = convert_format_time(review['comment_date'])
rating = self.construct_review_rating()
review_meta = get_list_from_list_by_field(reviews_ext['data']['comment_meta'], 'comment_id', review['comment_ID'])
rating['id'] = get_row_value_from_list_by_field(review_meta, 'comment_id', review['comment_ID'], 'meta_id')
rating['rate_code'] = 'default'
rating['rate'] = self.get_value_metadata(review_meta, 'rating', 5)
review_data['rating'].append(rating)
return response_success(review_data)
def get_review_id_import(self, convert, review, reviews_ext):
return review['comment_ID']
def check_review_import(self, convert, review, reviews_ext):
return True if self.get_map_field_by_src(self.TYPE_REVIEW, convert['id'], convert['code']) else False
def router_review_import(self, convert, review, reviews_ext):
return response_success('review_import')
def before_review_import(self, convert, review, reviews_ext):
return response_success()
def review_import(self, convert, review, reviews_ext):
lang_code = self._notice['target']['language_default']
if convert.get('store_id'):
lang_code = self._notice['map']['languages'].get(to_str(convert['store_id']))
product_id = False
if convert['product']['id'] or convert['product']['code']:
if self.is_wpml():
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'], lang = lang_code)
else:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, convert['product']['id'], convert['product']['code'])
if not product_id:
product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, None, convert['product']['code'], lang = lang_code)
if not product_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'], 'product of review not exists.')
return response_error(msg)
customer_id = 0
if convert['customer']['id'] or convert['customer']['code']:
customer_id = self.get_map_field_by_src(self.TYPE_CUSTOMER, convert['customer']['id'])
if not customer_id:
customer_id = 0
rv_status = {
'2': 0, # pedding
'1': 1, # approved
'3': 'spam', # not approved
'0': 0
}
review_data = {
'comment_post_ID': product_id,
'comment_author': convert['customer']['name'],
'comment_author_email': '',
'comment_date': convert.get('created_at') if convert.get('created_at') else get_current_time(),
'comment_date_gmt': convert['updated_at'] if convert['updated_at'] is not None else get_current_time(),
'comment_content': convert['content'] if convert['content'] else '',
'comment_karma': 0,
'comment_approved': rv_status.get(str(convert['status']), 'spam'),
'comment_parent': 0,
'comment_type': "review",
'user_id': customer_id
}
review_query = self.create_insert_query_connector("comments", review_data)
review_id = self.import_review_data_connector(review_query, True, convert['id'])
if not review_id:
msg = self.warning_import_entity('Review', convert['id'], convert['code'])
return response_error(msg)
self.insert_map(self.TYPE_REVIEW, convert['id'], review_id, convert['code'])
return response_success(review_id)
def after_review_import(self, review_id, convert, review, reviews_ext):
ratings = convert['rating']
for rating in ratings:
comment_meta = {
'rating': to_int(rating['rate'])
}
for meta_key, meta_value in comment_meta.items():
meta_insert = {
'comment_id': review_id,
'meta_key': meta_key,
'meta_value': meta_value
}
meta_query = self.create_insert_query_connector("commentmeta", meta_insert)
self.import_data_connector(meta_query, 'review')
return response_success()
def addition_review_import(self, convert, review, reviews_ext):
return response_success()
# TODO: Page
def check_page_import(self, convert, page, pages_ext):
return True if self.get_map_field_by_src(self.TYPE_PAGE, convert['id'], convert['code'], lang = self._notice['target']['language_default']) else False
def page_import(self, convert, page, pages_ext):
language_code = convert.get('language_code')
if self.is_wpml() and not language_code:
language_code = self._notice['target']['language_default']
code_name = convert['title']
code_name = self.sanitize_title(code_name).strip('-')
if self.is_wpml() and language_code:
code_name = code_name + '-' + language_code
check_slug_exist = True
while check_slug_exist:
check_slug_exist = True if self.select_map(self._migration_id, self.TYPE_PAGE, None, None, None, code_name, None, language_code) else False
if check_slug_exist:
code_name += to_str(get_value_by_key_in_dict(convert, 'id', ''))
parent_id = self.get_map_field_by_src(self.TYPE_PAGE, to_int(convert['parent_id']), None, language_code)
if not parent_id:
parent_id = 0
data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': convert['content'] if convert['content'] else "",
'post_title': convert['title'],
'post_status': 'publish' if convert['status'] else 'trash',
'comment_status': convert.get('comment_status', 'open'),
'ping_status': 'open',
'post_name': code_name[:200],
'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_parent': parent_id,
'post_type': 'page',
'comment_count': 0,
'guid': '',
'post_excerpt': '',
'to_ping': '',
'pinged': '',
'post_content_filtered': '',
'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0)
}
page_query = self.create_insert_query_connector('posts', data)
page_id = self.import_page_data_connector(page_query, True, convert['id'])
if not page_id:
return response_error('Page ' + to_str(convert['id']) + ' import false.')
self.insert_map(self.TYPE_PAGE, convert['id'], page_id, convert['title'], code_name, None, language_code)
return response_success(page_id)
def after_page_import(self, page_id, convert, page, pages_ext):
# data = {
# 'guid': self._notice['target']['cart_url'] + '?p=' + str(page_id)
# }
# where_id = {
# 'id': page_id
# }
# update_query = self.create_update_query_connector('posts', data, where_id)
# self.import_data_connector(update_query, 'page')
# data_meta = {
# 'post_id': page_id,
# 'meta_key': '_edit_lock',
# 'meta_value': int(time.time()),
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', data_meta), True, convert['id'])
# thumbnail_id = False
# if convert['images']:
# for image in convert['images']:
# image_process = self.process_image_before_import(image['url'], image.get('path', ''))
# image_import_path = self.uploadImageConnector(image_process, self.add_prefix_path(self.make_woocommerce_image_path(image_process['path']), self._notice['target']['config']['image_product'].rstrip('/')))
# if image_import_path:
# product_image = self.remove_prefix_path(image_import_path, self._notice['target']['config']['image_product'])
# image_details = self.get_sizes(image_process['url'])
# thumbnail_id = self.wp_image(product_image, image_details)
# postmeta = dict()
# if thumbnail_id:
# postmeta['_thumbnail_id'] = thumbnail_id
# for meta_key, value in postmeta.items():
# postmeta_data = {
# 'post_id': page_id,
# 'meta_key': meta_key,
# 'meta_value': value
# }
# self.import_page_data_connector(self.create_insert_query_connector('postmeta', postmeta_data), True, convert['id'])
# data_revision = {
# 'post_author': 1,
# 'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_content': convert['content'],
# 'post_title': convert['title'],
# 'post_status': 'inherit',
# 'comment_status': 'closed',
# 'ping_status': 'closed',
# 'post_name': str(page_id) + '-revision-v1',
# 'post_modified': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_modified_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
# 'post_parent': page_id,
# 'menu_order': get_value_by_key_in_dict(convert, 'sort_order', 0),
# 'post_type': 'revision',
# 'comment_count': 0,
# 'guid': self._notice['target']['cart_url'] + '/2019/08/27/' + str(page_id) + '-revision-v1',
# 'post_excerpt': '',
# 'to_ping': '',
# 'pinged': '',
# 'post_content_filtered': ''
# }
# self.import_page_data_connector(self.create_insert_query_connector('posts', data_revision), True, convert['id'])
super().after_page_import(page_id, convert, page, pages_ext)
if self.is_wpml():
source_language_code = self._notice['target']['language_default']
language_code = convert.get('language_code')
if not language_code:
language_code = source_language_code
source_language_code = None
trid = convert.get('trid')
if not trid:
trid = self.get_new_trid()
wpml_default = {
'element_type': 'post_page',
'element_id': page_id,
'trid': trid,
'language_code': language_code,
'source_language_code': source_language_code
}
self.import_data_connector(self.create_insert_query_connector("icl_translations", wpml_default), 'page')
if not convert.get('language_code'):
list_target_id = list()
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if target_language_id in list_target_id or to_str(target_language_id) == to_str(self._notice['target']['language_default']):
continue
list_target_id.append(target_language_id)
page_lang = self.get_convert_data_language(convert, src_language_id)
page_lang['trid'] = trid
page_lang['language_code'] = target_language_id
page_import = self.page_import(page_lang, page, pages_ext)
if page_import['result'] == 'success':
self.after_page_import(page_import['data'], page_lang, page, pages_ext)
return response_success()
# TODO: Coupon
def prepare_coupons_import(self):
return response_success()
def prepare_coupons_export(self):
return self
def get_coupons_main_export(self):
id_src = self._notice['process']['coupons']['id_src']
limit = self._notice['setting']['coupons']
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_posts WHERE ID > " + to_str(id_src) + " AND post_type = 'shop_coupon' ORDER BY ID ASC LIMIT " + to_str(limit),
}
coupons = self.select_data_connector(query, 'coupons')
if not coupons or coupons['result'] != 'success':
return response_error()
return coupons
def get_coupons_ext_export(self, coupons):
coupon_ids = duplicate_field_value_from_list(coupons['data'], 'ID')
coupon_id_con = self.list_to_in_condition(coupon_ids)
coupon_ext_queries = {
'postmeta': {
'type': "select",
'query': "SELECT * FROM _DBPRF_postmeta WHERE post_id IN " + coupon_id_con
},
}
coupons_ext = self.select_multiple_data_connector(coupon_ext_queries, 'products')
if (not coupons_ext) or coupons_ext['result'] != 'success':
return response_error()
return coupons_ext
def convert_coupon_export(self, coupon, coupons_ext):
coupon_data = self.construct_coupon()
coupon_data['id'] = coupon['ID']
postmeta = get_list_from_list_by_field(coupons_ext['data']['postmeta'], 'post_id', coupon['ID'])
coupon_data['code'] = coupon['post_title']
coupon_data['title'] = coupon['post_name']
coupon_data['description'] = coupon['post_excerpt']
coupon_data['status'] = True if coupon['post_status'] == 'publish' else False
coupon_data['created_at'] = convert_format_time(coupon['post_date'])
coupon_data['updated_at'] = convert_format_time(coupon['post_modified'])
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'date_expires'))
if not coupon_data['to_date']:
coupon_data['to_date'] = convert_format_time(self.get_value_metadata(postmeta, 'expiry_date'))
coupon_data['min_spend'] = self.get_value_metadata(postmeta, 'minimum_amount') if to_str(self.get_value_metadata(postmeta, 'minimum_amount')) != 'None' else None
coupon_data['max_spend'] = self.get_value_metadata(postmeta, 'maximum_amount') if to_str(self.get_value_metadata(postmeta, 'maximum_amount')) != 'None' else None
coupon_data['times_used'] = self.get_value_metadata(postmeta, 'usage_count')
coupon_data['usage_limit'] = self.get_value_metadata(postmeta, 'usage_limit', 0)
coupon_data['discount_amount'] = self.get_value_metadata(postmeta, 'coupon_amount')
coupon_data['usage_per_customer'] = self.get_value_metadata(postmeta, 'usage_limit_per_user')
coupon_data['type'] = self.PERCENT if self.get_value_metadata(postmeta, 'discount_type') == 'percent' else self.FIXED
coupon_data['simple_free_shipping'] = 1 if self.get_value_metadata(postmeta, 'free_shipping') == 'yes' else 0
coupon_data['limit_usage_to_x_items'] = self.get_value_metadata(postmeta, 'limit_usage_to_x_items')
product_ids = self.get_value_metadata(postmeta, 'product_ids')
if product_ids:
coupon_data['products'] = to_str(product_ids).split(',')
category_ids = self.get_value_metadata(postmeta, 'product_categories')
if category_ids:
category_ids = php_unserialize(category_ids)
if category_ids:
coupon_data['categories'] = category_ids
return response_success(coupon_data)
def get_coupon_id_import(self, convert, coupon, coupons_ext):
return coupon['ID']
def check_coupon_import(self, convert, coupon, coupons_ext):
return True if self.get_map_field_by_src(self.TYPE_COUPON, convert['id'], convert['code']) else False
def router_coupon_import(self, convert, coupon, coupons_ext):
return response_success('coupon_import')
def before_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def coupon_import(self, convert, coupon, coupons_ext):
coupon_data = {
'post_author': 1,
'post_date': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_date_gmt': convert['created_at'] if convert['created_at'] and '0000-00-00' not in convert['created_at'] else get_current_time(),
'post_content': '',
'post_title': convert['code'] if convert['code'] else convert['title'],
'post_excerpt': self.change_img_src_in_text(get_value_by_key_in_dict(convert, 'description', '')),
'post_status': "publish" if convert['status'] else "draft",
'comment_status': 'open',
'ping_status': 'closed',
'post_password': '',
'post_name': self.strip_html_tag(convert['title']),
'to_ping': '',
'pinged': '',
'post_modified': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_modified_gmt': convert['updated_at'] if convert and convert['updated_at'] and '0000-00-00' not in convert['updated_at'] else get_current_time(),
'post_content_filtered': '',
'post_parent': 0,
'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=",
'menu_order': convert.get('menu_order', 0),
'post_type': "shop_coupon",
'post_mime_type': '',
'comment_count': 0
}
coupon_query = self.create_insert_query_connector('posts', coupon_data)
coupon_import = self.import_data_connector(coupon_query, 'coupons', convert['id'])
if not coupon_import:
return response_error()
self.insert_map(self.TYPE_COUPON, convert['id'], coupon_import, convert['code'])
return response_success(coupon_import)
def after_coupon_import(self, coupon_id, convert, coupon, coupons_ext):
all_queries = list()
product_ids = convert.get('products')
if product_ids:
product_id_map_arr = list()
for product_id in product_ids:
map_product_id = self.get_map_field_by_src(self.TYPE_PRODUCT, product_id)
if map_product_id and map_product_id not in product_id_map_arr:
product_id_map_arr.append(to_str(map_product_id))
if product_id_map_arr:
product_ids = ','.join(product_id_map_arr)
else:
product_ids = None
category_ids = convert.get('categories')
cate_id_map_arr = list()
if category_ids:
for category_id in category_ids:
map_cate_id = self.get_map_field_by_src(self.TYPE_CATEGORY, category_id)
if map_cate_id and map_cate_id not in cate_id_map_arr:
cate_id_map_arr.append(to_str(map_cate_id))
# if product_id_map_arr:
# product_ids = ','.join(cate_id_map_arr)
# else:
# product_ids = None
coupon_meta = {
'discount_type': 'percent' if convert['type'] == self.PERCENT else 'fixed_cart' if convert['type'] == self.FIXED else 'fixed_product',
'coupon_amount': convert['discount_amount'],
'usage_limit': convert['usage_limit'],
'usage_limit_per_user': convert['usage_per_customer'],
'free_shipping': 'yes' if 'simple_free_shipping' in convert and to_str(to_int(convert['simple_free_shipping'])) == '1' else 'no',
'usage_count': convert['times_used'],
'date_expires': convert['to_date'] if (convert['to_date'] and convert['to_date'] != '0000-00-00 00:00:00') else '',
'minimum_amount': convert['min_spend'],
'maximum_amount': convert['max_spend'],
'product_ids': product_ids if product_ids else None,
'product_categories': php_serialize(cate_id_map_arr) if cate_id_map_arr else '',
'customer_email': php_serialize(convert.get('customer')),
'limit_usage_to_x_items': convert.get('limit_usage_to_x_items', 0),
}
for meta_key, meta_value in coupon_meta.items():
meta_insert = {
'post_id': coupon_id,
'meta_key': meta_key,
'meta_value': str(meta_value).replace(')', '').replace(',', '').replace("'", '')
}
meta_query = self.create_insert_query_connector("postmeta", meta_insert)
all_queries.append(meta_query)
all_queries.append(self.create_update_query_connector('posts', {'guid': self._notice['target']['cart_url'] + "/?post_type=shop_coupon&p=" + to_str(coupon_id)}, {'ID': coupon_id}))
self.import_multiple_data_connector(all_queries, 'coupons')
return response_success()
def addition_coupon_import(self, convert, coupon, coupons_ext):
return response_success()
def display_finish_target(self):
migration_id = self._migration_id
recent_exist = self.select_row(TABLE_RECENT, {'migration_id': migration_id})
notice = json.dumps(self._notice)
if recent_exist:
self.update_obj(TABLE_RECENT, {'notice': notice}, {'migration_id': migration_id})
else:
self.insert_obj(TABLE_RECENT, {'notice': notice, 'migration_id': migration_id})
target_cart_type = self._notice['target']['cart_type']
target_setup_type = self.target_cart_setup(target_cart_type)
# if target_setup_type == 'connector':
token = self._notice['target']['config']['token']
url = self.get_connector_url('clearcache', token)
self.get_connector_data(url)
all_queries = list()
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'product_cat_children'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = '_transient_wc_attribute_taxonomies'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_timeout_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE `option_name` LIKE '%_transient_wc_report_customers%'"
})
all_queries.append({
'type': 'query',
'query': "DELETE FROM `_DBPRF_options` WHERE option_name = 'urlrewrite_type'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_posts` SET `comment_count`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_posts`.ID AND `_DBPRF_comments`.comment_approved = 1) WHERE `post_type` IN ('product', 'post')"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT COUNT(comment_ID) FROM `_DBPRF_comments` WHERE `_DBPRF_comments`.comment_post_ID = `_DBPRF_postmeta`.post_id AND `_DBPRF_comments`.comment_approved = 1) WHERE `meta_key` = '_wc_review_count'"
})
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_postmeta` SET `meta_value`= (SELECT AVG(cmta.`meta_value`) FROM `_DBPRF_comments` AS cmt LEFT JOIN `_DBPRF_commentmeta` AS cmta ON cmt.`comment_ID` = cmta.`comment_ID` WHERE cmt.`comment_post_ID` = `_DBPRF_postmeta`.`post_id` AND cmt.comment_approved = 1 AND cmta.`meta_key` = 'rating') WHERE `meta_key` = '_wc_average_rating'"
})
# all_queries.append({
# 'type': 'query',
# 'query': "UPDATE `_DBPRF_term_taxonomy` tt "
# "SET tt.count = (SELECT COUNT( *) as total "
# "FROM _DBPRF_term_relationships r JOIN _DBPRF_posts p ON r.object_id = p.ID "
# "WHERE r.term_taxonomy_id = tt.term_taxonomy_id AND p.post_type = 'product' AND p.post_parent = '') "
# "WHERE tt.taxonomy IN('product_cat', 'product_type', 'product_tag', 'product_brand')"
# })
all_queries.append({
'type': 'query',
'query': "UPDATE `_DBPRF_term_taxonomy` AS tt SET tt.count = (SELECT COUNT(1) AS total FROM _DBPRF_term_relationships AS tr WHERE tt.term_taxonomy_id = tr.term_taxonomy_id AND tr.object_id IN (SELECT ID FROM _DBPRF_posts WHERE post_type = 'product'))"
})
clear_cache = self.import_multiple_data_connector(all_queries)
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'urlrewrite',
'autoload': 'yes'
}
if self._notice['support'].get('seo_301'):
option_data = {
'option_name': 'urlrewrite_type',
'option_value': 'url301',
'autoload': 'yes'
}
option_query = self.create_insert_query_connector('options', option_data)
option_import = self.import_data_connector(option_query, 'options')
return response_success()
def substr_replace(self, subject, replace, start, length):
if length == None:
return subject[:start] + replace
elif length < 0:
return subject[:start] + replace + subject[length:]
else:
return subject[:start] + replace + subject[start + length:]
def add_construct_default(self, construct):
construct['site_id'] = 1
construct['language_id'] = self._notice['src']['language_default']
return construct
def get_term_by_name(self, data):
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_visibility' AND t.name = '" + data + "'"
}
product_taxonomy = self.select_data_connector(query)
if product_taxonomy['result'] == 'success' and product_taxonomy['data']:
return product_taxonomy['data'][0]['term_taxonomy_id']
return None
def get_product_type(self, product_type):
if not self.product_types:
query = {
'type': 'select',
'query': "SELECT * FROM _DBPRF_term_taxonomy AS tt "
"LEFT JOIN _DBPRF_terms AS t ON t.term_id = tt.term_id "
"WHERE tt.taxonomy = 'product_type'"
}
product_types = self.select_data_connector(query)
if product_types['result'] == 'success' and product_types['data']:
for product_type_row in product_types['data']:
self.product_types[product_type_row['slug']] = product_type_row['term_taxonomy_id']
return self.product_types.get(product_type, 2)
def import_category_parent(self, convert_parent, lang_code = None):
category_type = self.TYPE_CATEGORY
if convert_parent.get('is_blog'):
category_type = self.TYPE_CATEGORY_BLOG
parent_exists = self.get_map_field_by_src(category_type, convert_parent['id'], convert_parent['code'], lang_code)
if parent_exists:
return response_success(parent_exists)
if self.is_wpml() and lang_code:
convert_parent['language_code'] = lang_code
for src_language_id, target_language_id in self._notice['map']['languages'].items():
if to_str(lang_code) == to_str(target_language_id):
lang_data = convert_parent
if to_str(src_language_id) in convert_parent['languages'] and convert_parent['languages'][to_str(src_language_id)]:
lang_data = convert_parent['languages'][to_str(src_language_id)]
convert_parent['name'] = lang_data['name']
convert_parent['description'] = lang_data['description']
convert_parent['short_description'] = lang_data['short_description']
convert_parent['meta_title'] = lang_data['meta_title']
convert_parent['meta_keyword'] = lang_data['meta_keyword']
convert_parent['meta_description'] = lang_data['meta_description']
convert_parent['url_key'] = lang_data.get('url_key', '')
category = get_value_by_key_in_dict(convert_parent, 'category', dict())
categories_ext = get_value_by_key_in_dict(convert_parent, 'categories_ext', dict())
category_parent_import = self.category_import(convert_parent, category, categories_ext)
self.after_category_import(category_parent_import['data'], convert_parent, category, categories_ext)
return category_parent_import
def get_list_from_list_by_field_as_first_key(self, list_data, field = '', first_key = ''):
result = list()
if isinstance(list_data, dict):
for key, row in list_data.items():
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
if field and to_str(field) != '':
for row in list_data:
if field in row:
if row[field].find(first_key) == 0:
result.append(row)
else:
for row in list_data:
if row:
v_index = row.find(first_key)
if v_index == 0:
result.append(row)
return result
def process_image_before_import(self, url, path):
if not path:
full_url = url
path = strip_domain_from_url(url)
else:
full_url = join_url_path(url, path)
if path and path.find('/wp-content/uploads/') != -1:
newpath = path.split('/wp-content/uploads/')
if newpath and to_len(newpath) > 1:
path = newpath[1]
path = re.sub(r"[^a-zA-Z0-9.-_()]", '', path)
full_url = self.parse_url(full_url)
return {
'url': full_url,
'path': path
}
def wpml_attributes_to_in_condition(self, list_keys):
if not list_keys:
return "('null')"
result = "('tax_" + "','tax_".join([str(k) for k in list_keys]) + "')"
return result
def brand_image_in_condition(self, term_ids):
if not term_ids:
return "('null')"
result = "('brand_taxonomy_image" + "','brand_taxonomy_image".join([str(k) for k in term_ids]) + "')"
return result
def detect_seo(self):
return 'default_seo'
def categories_default_seo(self, category, categories_ext):
result = list()
seo_cate = self.construct_seo_category()
seo_cate['request_path'] = self._notice['src']['config']['product_category_base'].strip('/') + '/' + to_str(category['slug'])
seo_cate['default'] = True
result.append(seo_cate)
return result
def products_default_seo(self, product, products_ext):
result = list()
if self._notice['src']['config']['product_base'].find('%product_cat%') != -1:
term_relationship = get_list_from_list_by_field(products_ext['data']['term_relationship'], 'object_id', product['ID'])
category_src = get_list_from_list_by_field(term_relationship, 'taxonomy', 'product_cat')
if category_src:
for product_category in category_src:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product_category['slug']) + '/' + to_str(product['post_name'])
seo_product['category_id'] = product_category['term_id']
result.append(seo_product)
else:
seo_product = self.construct_seo_product()
seo_product['request_path'] = self._notice['src']['config']['product_base'].strip('/') + '/' + to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
if product['post_name']:
seo_product = self.construct_seo_product()
seo_product['request_path'] = to_str(product['post_name'])
seo_product['default'] = True
result.append(seo_product)
return result
def get_order_status_label(self, order_status):
if not order_status:
return ''
order_status = order_status.replace('wc-', '')
order_status = order_status.replace('-', ' ')
order_status = order_status.capitalize()
return order_status
def get_woo_attribute_id(self, pro_attr_code, attribute_name, language_code = None, language_attribute_data = None, attribute_type = 'select'):
# if to_str(pro_attr_code)[0:3] != 'pa_':
# pro_attr_code = "pa_" + pro_attr_code
# if self.is_wpml() and language_code != self._notice['target']['language_default']:
# attribute_data_default = self.get_convert_data_language(language_attribute_data, None, self._notice['target']['language_default'], 'option_languages')
# option_lang_name = attribute_data_default.get('option_name')
# if not option_lang_name:
# option_lang_name = attribute_data_default.get('attribute_name')
# if option_lang_name:
pro_attr_code = urllib.parse.unquote(pro_attr_code)
woo_attribute_id = self.get_map_field_by_src(self.TYPE_ATTR, None, 'pa_' + pro_attr_code)
# if woo_attribute_id:
# return woo_attribute_id
if not woo_attribute_id:
attribute_data = {
'attribute_name': pro_attr_code,
'attribute_type': attribute_type
}
attribute_result = self.select_data_connector(self.create_select_query_connector('woocommerce_attribute_taxonomies', attribute_data))
woo_attribute_id = None
if attribute_result and attribute_result['data']:
woo_attribute_id = attribute_result['data'][0]['attribute_id']
if not woo_attribute_id:
pro_attr_data = {
'attribute_name': pro_attr_code,
'attribute_label': attribute_name,
'attribute_type': attribute_type,
'attribute_orderby': "menu_order",
'attribute_public': 0,
}
woo_attribute_id = self.import_data_connector(self.create_insert_query_connector('woocommerce_attribute_taxonomies', pro_attr_data), 'products')
if woo_attribute_id:
self.insert_map(self.TYPE_ATTR, None, woo_attribute_id, 'pa_' + pro_attr_code)
if woo_attribute_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(language_attribute_data, None, language_code, 'option_languages')
option_lang_name = attribute_data_lang.get('option_name')
if not option_lang_name:
option_lang_name = attribute_data_lang.get('attribute_name')
if option_lang_name != attribute_name:
translate_id = self.get_map_field_by_src('translate', woo_attribute_id, None, language_code)
if not translate_id:
translate_query = {
'icl_strings': self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}),
'icl_string_translations': {
'type': 'select',
'query': "select * from _DBPRF_icl_string_translations where string_id in (" + self.create_select_query_connector('icl_strings', {'value': attribute_name, 'name': 'taxonomy singular name: ' + attribute_name}, 'id')['query'] + ")"
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
icl_string_id = None
is_tranlate = False
if not select['data']['icl_strings']:
icl_strings_data = {
'language': self._notice['target']['language_default'],
'context': 'WordPress',
'name': 'taxonomy singular name: ' + attribute_name,
'value': attribute_name,
'string_package_id': None,
'wrap_tag': '',
'type': 'LINE',
'title': None,
'status': 2,
'gettext_context': '',
'domain_name_context_md5': hashlib.md5(to_str('WordPresstaxonomy singular name: ' + attribute_name).encode()),
'translation_priority': 'optional',
'word_count': None
}
icl_string_id = self.import_product_data_connector(self.create_insert_query_connector('icl_strings', icl_strings_data))
else:
icl_string = select['data']['icl_strings'][0]
if icl_string['language'] != language_code:
icl_string_id = icl_string['id']
check = get_row_from_list_by_field(select['data']['icl_string_translations'], 'language', language_code)
is_tranlate = True if check else False
else:
is_tranlate = True
if icl_string_id and not is_tranlate:
icl_string_translations_data = {
'string_id': icl_string_id,
'language': language_code,
'status': 10,
'value': option_lang_name,
'translator_id': None,
'translation_service': '',
'batch_id': 0,
'translation_date': get_current_time()
}
icl_string_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_string_translations', icl_string_translations_data))
if icl_string_translation_id:
self.insert_map('translate', woo_attribute_id, icl_string_translation_id, None, None, None, language_code)
return woo_attribute_id
def get_woo_attribute_value(self, attribute_value, pro_attr_code, language_code = None, attribute_data = None, desc = ''):
pro_attr_code = urllib.parse.unquote(pro_attr_code)
if self.is_wpml():
value_data = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
attribute_value = to_str(attribute_value)[:200]
slug_default = self.get_slug_attr(attribute_data)
slug = self.get_slug_attr(attribute_data, language_code)
opt_value_id = None
# if opt_value_exist:
# return opt_value_exist['id_desc']
# opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug)
opt_value_exist = self.select_map(self._migration_id, self.TYPE_ATTR_VALUE, None, None, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_exist:
if not self.is_wpml() or not language_code or language_code == self._notice['target']['language_default']:
return opt_value_exist['id_desc']
else:
opt_value_id = opt_value_exist['id_desc']
if not opt_value_id:
query = {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_value) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
attribute_result = self.select_data_connector(query)
if attribute_result and attribute_result['data']:
opt_value_id = attribute_result['data'][0]['term_taxonomy_id']
if not opt_value_id:
if self.is_wpml() and language_code != self._notice['target']['language_default']:
new_slug = slug_default + '-' + to_str(language_code) if slug == slug_default else slug
else:
new_slug = slug_default
value_term = {
'name': attribute_value,
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
if opt_value_id:
if self.is_wpml():
attribute_data_lang = self.get_convert_data_language(attribute_data, None, language_code, 'option_value_languages')
if attribute_data_lang['option_value_name'] != attribute_value:
translate_query = {
'icl_translations': {
'type': 'select',
'query': 'select * from _DBPRF_icl_translations where trid in (select trid from wp_icl_translations where ' + self.dict_to_where_condition({'element_id': opt_value_id, 'element_type': 'tax_pa_' + pro_attr_code}) + ')'
},
'term': {
'type': 'select',
'query': 'SELECT * FROM _DBPRF_terms AS term LEFT JOIN _DBPRF_term_taxonomy AS taxonomy ON term.term_id = taxonomy.term_id WHERE term.name = ' + self.escape(attribute_data_lang['option_value_name']) + " AND taxonomy.taxonomy = " + self.escape('pa_' + pro_attr_code)
}
}
select = self.select_multiple_data_connector(translate_query)
if select['result'] == 'success':
trid = None
is_tranlate = False
if not select['data']['icl_translations']:
trid = self.get_new_trid()
icl_translations_data = {
'language_code': self._notice['target']['language_default'],
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': None,
}
icl_translation_id = self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
else:
icl_translations = select['data']['icl_translations'][0]
trid = icl_translations['trid']
check = get_row_from_list_by_field(select['data']['icl_translations'], 'language_code', language_code)
is_tranlate = True if check else False
if trid and not is_tranlate:
new_slug = slug_default + '-' + to_str(language_code) if slug != slug_default else slug_default
value_term = {
'name': attribute_data_lang['option_value_name'],
'slug': new_slug,
'term_group': 0,
}
term_id = self.import_product_data_connector(self.create_insert_query_connector('terms', value_term), 'products')
value_term_taxonomy = {
'term_id': term_id,
'taxonomy': 'pa_' + pro_attr_code,
'description': desc,
'parent': 0,
'count': 0
}
opt_value_id = self.import_product_data_connector(self.create_insert_query_connector('term_taxonomy', value_term_taxonomy), 'products')
if opt_value_id:
icl_translations_data = {
'language_code': language_code,
'element_type': 'tax_pa_' + pro_attr_code,
'element_id': opt_value_id,
'trid': trid,
'source_language_code': self._notice['target']['language_default'],
}
self.import_product_data_connector(self.create_insert_query_connector('icl_translations', icl_translations_data))
self.insert_map(self.TYPE_ATTR_VALUE, None, opt_value_id, 'pa_' + pro_attr_code, None, slug, language_code)
return opt_value_id
def to_timestamp(self, value, str_format = '%Y-%m-%d %H:%M:%S'):
try:
timestamp = to_int(time.mktime(time.strptime(value, str_format)))
if timestamp:
return timestamp
return to_int(time.time())
except:
return to_int(time.time())
def get_map_field_by_src(self, map_type = None, id_src = None, code_src = None, lang = None, field = 'id_desc'):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().get_map_field_by_src(map_type, id_src, code_src, field)
if not id_src and not code_src:
return False
_migration_id = self._migration_id
# if id_src:
# code_src = None
# else:
# code_src = None
map_data = self.select_map(_migration_id, map_type, id_src, None, code_src, None, None, lang)
if not map_data:
return False
return map_data.get(field, False)
def select_map(self, _migration_id = None, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if not self.is_wpml() and not self.is_polylang() or map_type in [self.TYPE_PATH_IMAGE, self.TYPE_IMAGE]:
return super().select_map(_migration_id, map_type, id_src, id_desc, code_src, code_desc, value)
where = dict()
if _migration_id:
where['migration_id'] = _migration_id
if map_type:
where['type'] = map_type
if id_src:
where['id_src'] = id_src
if id_desc:
where['id_desc'] = id_desc
if code_src:
where['code_src'] = code_src
if code_desc:
where['code_desc'] = code_desc
if value:
where['value'] = value
if (self.is_wpml() or self.is_polylang()) and map_type in [self.TYPE_CATEGORY, self.TYPE_PRODUCT, self.TYPE_ATTR, self.TYPE_ATTR_VALUE]:
where['lang'] = lang
if not where:
return None
result = self.select_obj(TABLE_MAP, where)
try:
data = result['data'][0]
except Exception as e:
data = None
return data
def insert_map(self, map_type = None, id_src = None, id_desc = None, code_src = None, code_desc = None, value = None, lang = None):
if to_int(id_src) == 0 and to_str(id_src) != '0':
id_src = None
data_inset = {
'migration_id': self._migration_id,
'type': map_type,
'id_src': id_src,
'code_src': code_src,
'id_desc': id_desc,
'code_desc': code_desc,
'value': value,
}
if self.is_wpml() or self.is_polylang():
data_inset['lang'] = lang
insert = self.insert_obj(TABLE_MAP, data_inset)
if (not insert) or (insert['result'] != 'success'):
return False
return insert['data']
def is_wpml(self):
return self._notice[self.get_type()]['support'].get('wpml')
def is_polylang(self):
return self._notice[self.get_type()]['support'].get('polylang')
def get_convert_data_language(self, convert, src_language_id = None, target_language_id = None, key_language = 'languages'):
if not self.is_wpml() and not self.is_polylang():
return convert
list_language_data = convert.get(key_language)
if not list_language_data:
return convert
language_data = None
if src_language_id:
if list_language_data.get(to_str(src_language_id)):
language_data = list_language_data[to_str(src_language_id)]
elif target_language_id:
for src_id, data in list_language_data.items():
if self._notice['map']['languages'].get(to_str(src_id)) == target_language_id:
language_data = data
break
if not language_data:
return convert
for key_lang, value in language_data.items():
if not value:
continue
if key_lang == 'option_value_name' and convert.get('option_type') == self.OPTION_MULTISELECT and 'position_option' in convert:
value_lang = to_str(value).split(';')
if len(value_lang) > to_int(convert.get('position_option')):
value = value_lang[to_int(convert.get('position_option'))]
convert[key_lang] = value
return convert
def get_pro_attr_code_default(self, option):
if self.is_wpml():
option = self.get_convert_data_language(option, None, self._notice['target']['language_default'], 'option_languages')
pro_attr_code = to_str(option['option_name']).lower()
# attribute_name = option['option_name']
pro_attr_code = pro_attr_code.replace(' ', '_')
if option['option_code']:
pro_attr_code = to_str(option['option_code']).lower()
pro_attr_code = pro_attr_code.replace(' ', '_')
pro_attr_code_len = 28
check_encode = chardet.detect(pro_attr_code.encode())
if check_encode['encoding'] != 'ascii':
pro_attr_code = pro_attr_code[0:14]
pro_attr_code_len = 200
pro_attr_code = self.sanitize_title(pro_attr_code, pro_attr_code_len)
return pro_attr_code
def get_slug_attr(self, option_value, language_code = None):
if option_value['option_value_code']:
return self.sanitize_title(to_str(option_value['option_value_code'])).lower()
attribute_value = option_value['option_value_name']
if self.is_wpml():
if not language_code:
language_code = self._notice['target']['language_default']
value_data = self.get_convert_data_language(option_value, None, language_code, 'option_value_languages')
if value_data:
attribute_value = value_data['option_value_name']
return self.sanitize_title(to_str(attribute_value).lower())
def get_key_check_default(self, attributes):
key_check = ''
for children_attribute in attributes:
if self.is_wpml():
children_attribute = self.get_convert_data_language(children_attribute, None, self._notice['target']['language_default'], 'option_value_languages')
if key_check:
key_check += '|'
key_check += to_str(children_attribute['option_name']) + ':' + to_str(children_attribute['option_value_name'])
return key_check
def lecm_rewrite_table_construct(self):
return {
'table': '_DBPRF_lecm_rewrite',
'rows': {
'id': 'INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY',
'link': 'VARCHAR(255)',
'type': 'VARCHAR(255)',
'type_id': 'INT(11)',
'redirect_type': 'SMALLINT(5)',
},
}
def is_woo2woo(self):
return self._notice['src']['cart_type'] == self._notice['target']['cart_type']
def check_sync_child(self, child, combination, check_any = False):
for attribute in combination:
if not check_any:
if to_str(child.get(attribute['option_name'])) != to_str(attribute['option_value_name']):
if to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
elif to_str(child.get(attribute['option_name'])) and to_str(child.get(to_str(attribute['option_code']).replace(' ', '-'))) != to_str(attribute['option_value_name']):
return False
return True
def select_all_category_map(self):
where = dict()
where['migration_id'] = self._migration_id
where['type'] = self.TYPE_CATEGORY if not self.blog_running else self.TYPE_CATEGORY_BLOG
result = self.select_obj(TABLE_MAP, where)
data = list()
if result['result'] == 'success' and result['data']:
data = result['data']
result_data = list()
if data:
for row in data:
value = row['id_desc']
result_data.append(value)
return result_data
def create_file_variant_limit(self):
file_path = get_pub_path() + '/media/' + to_str(self._migration_id)
if not os.path.exists(file_path):
os.makedirs(file_path, mode = 0o777)
file_name = file_path + '/variants.csv'
column = ['src_id', 'target_id', 'name', 'sku', 'variants']
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def warning_variant_limit(self, convert):
if convert['id']:
product = "#" + to_str(convert['id'])
else:
product = ': ' + to_str(convert['code'])
self.sleep_time(0, 'variant', True, msg = product)
def log_variant_limit(self, product_id, convert, variants):
self.is_variant_limit = True
file_name = get_pub_path() + '/media/' + to_str(self._migration_id) + '/variants.csv'
if not os.path.isfile(file_name):
self.create_file_variant_limit()
column = [convert['id'] if convert['id'] else convert['code'], product_id, convert['name'], convert['sku'], variants]
with open(file_name, mode = 'a') as employee_file:
employee_writer = csv.writer(employee_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
employee_writer.writerow(column)
return
def check_slug_exist(self, slug = None):
select = {
'slug': slug,
}
category_data = self.select_data_connector(self.create_select_query_connector('terms', select))
try:
term_id = category_data['data'][0]['term_id']
except Exception:
term_id = False
return term_id
def get_query_img_wpml(self, img_id, language_code):
source_language_code = self._notice['target']['language_default']
default_language_code = language_code
if source_language_code == default_language_code:
default_language_code = source_language_code
source_language_code = None
trid = self.get_new_trid()
wpml_img_data = {
'element_type': 'post_attachment',
'element_id': img_id,
'trid': trid,
'language_code': default_language_code,
'source_language_code': source_language_code
}
wpml_img_query = self.create_insert_query_connector("icl_translations", wpml_img_data)
return wpml_img_query
def check_exist_code_product(self, code_product):
check = self.select_data_connector(self.create_select_query_connector('posts', {'posttype'}))
def _get_customer_lookup_id(self, user_id):
if not user_id:
return 0
select = {
'user_id': user_id,
}
customer_lookup_data = self.select_data_connector(self.create_select_query_connector('wc_customer_lookup', select))
try:
customer_lookup_id = customer_lookup_data['data'][0]['customer_id']
except Exception:
customer_lookup_id = 0
return customer_lookup_id | [
"noreply@github.com"
] | noreply@github.com |
11276df4a89d1128a708b3fe2ff3a08e678a0a17 | c188122ea4797d10c6f8e1641a229545dc3fa53c | /core/thread/mtsleepA.py | 90c6ddea7f51e60da7f67b7b4c9cfb36cdb5c5ac | [
"BSD-3-Clause"
] | permissive | lostFox/autoRunSomething | e41fd34f44bb715fe6e21a70fa608e33efb779bc | 519f2ebca6e2c78aa3caeed2e88b8f92403a8b46 | refs/heads/master | 2021-07-15T23:08:24.624022 | 2021-03-03T00:16:40 | 2021-03-03T00:16:40 | 59,880,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'james'
import thread
from time import sleep, ctime
def loop0():
print 'start loop 0 at:', ctime()
sleep(4)
print 'loop 0 done at:', ctime()
def loop1():
print 'start loop 1 at', ctime()
sleep(2)
print 'loop 1 done at:', ctime()
def main():
print 'starting at:', ctime()
thread.start_new_thread(loop0, ())
thread.start_new_thread(loop1, ())
sleep(6)
print 'all DONE at:', ctime()
if __name__ == '__main__':
main()
| [
"lei_zhang@jit.com.cn"
] | lei_zhang@jit.com.cn |
649b19115673556dea865aec69f56090f46ec14a | 194dae90bf1cc497f9162eca3957fdc6c9094e33 | /deeptennis/data/dataset.py | 2176d31477742cddff68bb3f0879234dbb4103e5 | [
"MIT"
] | permissive | sethah/deeptennis | 41ea80002a5d7993b6ca625bb59efef1d70371f2 | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | refs/heads/master | 2021-06-03T23:51:59.754478 | 2020-05-18T00:13:41 | 2020-05-18T00:13:41 | 147,436,170 | 34 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import numpy as np
from pathlib import Path
from PIL import Image
from typing import Callable, List
import torch
def compute_mean_std(ds: torch.utils.data.Dataset):
"""
Compute the mean and standard deviation for each image channel.
"""
tsum = 0.
tcount = 0.
tsum2 = 0.
for i in range(len(ds)):
im, *_ = ds[i]
im = im.view(im.shape[0], -1)
tsum = tsum + im.sum(dim=1)
tcount = tcount + im.shape[1]
tsum2 = tsum2 + (im * im).sum(dim=1)
mean = tsum / tcount
std = torch.sqrt(tsum2 / tcount - mean ** 2)
return mean, std
class ImageFilesDataset(torch.utils.data.Dataset):
def __init__(self, files: List[Path], labels: np.ndarray=None, transform: Callable=None):
self.transform = transform
self.files = files
self.labels = np.zeros(len(files)) if labels is None else labels
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
file = self.files[idx]
label = self.labels[idx]
with open(file, 'rb') as f:
img = Image.open(f)
sample = img.convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, torch.tensor(label, dtype=torch.int64)
| [
"shendrickson@cloudera.com"
] | shendrickson@cloudera.com |
57fd48037b9de1fa98c0b884626e6a2d481584e2 | e287723f843005f0639542f8bf83eebb62192137 | /monitoring/prober/scd/test_operation_simple.py | 3cd623c1e8308608c2db5a4a51ff2f0a786e2a10 | [
"Apache-2.0"
] | permissive | RFARREY/dss | 6c02f2aedd0a78260dccbefe3e0d13108031c00e | 2989f68fb1293c08104866e8b0df116487bf075f | refs/heads/master | 2023-07-28T11:58:11.185048 | 2021-08-19T05:56:56 | 2021-08-19T05:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,617 | py | """Basic Operation tests:
- make sure the Operation doesn't exist with get or query
- create the Operation with a 60 minute length
- get by ID
- search with earliest_time and latest_time
- mutate
- delete
"""
import datetime
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC, SCOPE_CI, SCOPE_CM
from monitoring.monitorlib.testing import assert_datetimes_are_equal
BASE_URL = 'https://example.com/uss'
OP_ID = '0000008c-91c8-4afc-927d-d923f5000000'
def test_ensure_clean_workspace(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
def _make_op1_request():
time_start = datetime.datetime.utcnow() + datetime.timedelta(minutes=20)
time_end = time_start + datetime.timedelta(minutes=60)
return {
'extents': [scd.make_vol4(time_start, time_end, 0, 120, scd.make_circle(-56, 178, 50))],
'old_version': 0,
'state': 'Accepted',
'uss_base_url': BASE_URL,
'new_subscription': {
'uss_base_url': BASE_URL,
'notify_for_constraints': False
}
}
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_does_not_exist_get(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 404, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_op_does_not_exist_query(scd_session):
time_now = datetime.datetime.utcnow()
end_time = time_now + datetime.timedelta(hours=1)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
assert OP_ID not in [op['id'] for op in resp.json().get('operation_references', [])]
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(time_now, end_time, 0, 5000, scd.make_circle(-56, 178, 300))
}, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_single_extent(scd_session):
req = _make_op1_request()
req['extents'] = req['extents'][0]
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_missing_time_start(scd_session):
req = _make_op1_request()
del req['extents'][0]['time_start']
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: None
@default_scope(SCOPE_SC)
def test_create_op_missing_time_end(scd_session):
req = _make_op1_request()
del req['extents'][0]['time_end']
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req)
assert resp.status_code == 400, resp.content
# Preconditions: None
# Mutations: Operation OP_ID created by scd_session user
def test_create_op(scd_session):
req = _make_op1_request()
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == BASE_URL
assert_datetimes_are_equal(op['time_start']['value'], req['extents'][0]['time_start']['value'])
assert_datetimes_are_equal(op['time_end']['value'], req['extents'][0]['time_end']['value'])
assert op['version'] == 1
assert 'subscription_id' in op
assert 'state' not in op
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
def test_get_op_by_id(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.get('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == BASE_URL
assert op['version'] == 1
assert 'state' not in op
# Preconditions: None, though preferably Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_missing_params(scd_session):
resp = scd_session.post('/operation_references/query')
assert resp.status_code == 400, resp.content
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search(scd_session):
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json().get('operation_references', [])]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_earliest_time_included(scd_session):
earliest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=59)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(earliest_time, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_earliest_time_excluded(scd_session):
earliest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=81)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(earliest_time, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_latest_time_included(scd_session):
latest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=20)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, latest_time, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_op_by_search_latest_time_excluded(scd_session):
latest_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, latest_time, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
# Preconditions: Operation OP_ID created by scd_session user
# Mutations: Operation OP_ID mutated to second version
@default_scope(SCOPE_SC)
def test_mutate_op(scd_session):
# GET current op
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 200, resp.content
existing_op = resp.json().get('operation_reference', None)
assert existing_op is not None
req = _make_op1_request()
req = {
'key': [existing_op["ovn"]],
'extents': req['extents'],
'old_version': existing_op['version'],
'state': 'Activated',
'uss_base_url': 'https://example.com/uss2',
'subscription_id': existing_op['subscription_id']
}
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.put('/operation_references/{}'.format(OP_ID), json=req, scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
data = resp.json()
op = data['operation_reference']
assert op['id'] == OP_ID
assert op['uss_base_url'] == 'https://example.com/uss2'
assert op['version'] == 2
assert op['subscription_id'] == existing_op['subscription_id']
assert 'state' not in op
# Preconditions: Operation OP_ID mutated to second version
# Mutations: Operation OP_ID deleted
def test_delete_op(scd_session):
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_CI)
assert resp.status_code == 403, resp.content
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_CM)
assert resp.status_code == 403, resp.content
resp = scd_session.delete('/operation_references/{}'.format(OP_ID), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
# Preconditions: Operation OP_ID deleted
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_deleted_op_by_id(scd_session):
resp = scd_session.get('/operation_references/{}'.format(OP_ID))
assert resp.status_code == 404, resp.content
# Preconditions: Operation OP_ID deleted
# Mutations: None
@default_scope(SCOPE_SC)
def test_get_deleted_op_by_search(scd_session):
resp = scd_session.post('/operation_references/query', json={
'area_of_interest': scd.make_vol4(None, None, 0, 5000, scd.make_circle(-56, 178, 300))
})
assert resp.status_code == 200, resp.content
assert OP_ID not in [x['id'] for x in resp.json()['operation_references']]
| [
"noreply@github.com"
] | noreply@github.com |
3d9908bde294de7c224663f83522c34701c52a52 | e2daaaaeb89e6f4c6816f775fbe560c3fc765b4a | /HomeScanStaticAnalysis/controlflow.py | 3b57605063f2a412f609060aae77fc6eda6cd176 | [] | no_license | KulaniM/StaticAnalysis | deda8585de8faeef5ddeb0b1d0598cf37bbef65f | ecaad5113e7f9e267087a55496479c4384ccc646 | refs/heads/master | 2023-04-27T13:05:06.647890 | 2019-09-11T08:18:28 | 2019-09-11T08:18:28 | 207,722,516 | 0 | 0 | null | 2023-04-14T17:43:01 | 2019-09-11T04:23:28 | Java | UTF-8 | Python | false | false | 4,229 | py | ############################################################################################
########### depends on the output of this project https://github.com/gousiosg/java-callgraph
import sys
import json
callgraph = []
callgraphList = []
flowgraph = {}
forward = {}
backward = {}
init_caller_method =sys.argv[1] #str('a()')
init_caller_class = sys.argv[2]#str('hha')
init_callee_method = sys.argv[1]#str('getActivity()')
init_callee_class = sys.argv[2]#str('com.google.android.chimera.Fragment')
##
## M:class1:<method1>(arg_types) (typeofcall)class2:<method2>(arg_types)
## The line means that method1 of class1 called method2 of class2. The type of call can have one of the following values
with open('output2/callgraph.txt') as f:
content = f.read()
callgraph = content.strip().split(', ')
callgraph[0] = callgraph[0].replace('[', '')
callgraph[-1] = callgraph[-1].replace(']', '')
#print(len(callgraph))
#print(callgraph[-1])
callgraph = list(set(callgraph))
for call in callgraph:
callgraphD = {}
temp1 = call.strip().split(' ')
temp2 = temp1[0].strip().split(':')
if temp2[0] is "M":
temp3 = temp1[1].strip().split(':')
callgraphD['caller_method'] = temp2[2]
callgraphD['caller_class'] = temp2[1]
callgraphD['callee_method'] = temp3[1]
temp4 = temp3[0].strip().split(')')
callgraphD['callee_class'] = temp4[1]
callgraphD['callee_invoke-type'] = temp4[0].replace('(', '')
callgraphList.append(callgraphD)
##### Print all the methods called by the given_method of given_class### FORWARD FLOW
def forwardflow(caller_method,caller_class):
j = 0
temp5 = []
fward = []
for calldir in callgraphList:
if str(calldir.get("caller_method"))==caller_method and str(calldir.get("caller_class"))==caller_class:
j = j + 1
temp5.append(calldir)
fward.append(calldir.get('callee_method')+'/'+calldir.get('callee_class'))
if len(fward):
forward.update({caller_method+'/'+caller_class: fward})
return temp5
##### Print all the methods wich call within the given_method of given_class ### BACKWARD FLOW
def backwardflow(callee_method,callee_class):
j = 0
temp6 = []
bward = []
for calldir in callgraphList:
if str(calldir.get("callee_method"))==callee_method and str(calldir.get("callee_class"))==callee_class:
j = j + 1
temp6.append(calldir)
bward.append(calldir.get('caller_method')+'/'+calldir.get('caller_class'))
if len(bward):
backward.update({callee_method+'/'+callee_class: bward})
return temp6
##### forward flow call graph
i = 0
def forwardcallgraph(init_caller_method, init_caller_class):
global i
i = i+1
fleveli = forwardflow(init_caller_method,init_caller_class)
for callee in fleveli:
nextlevel = forwardcallgraph(callee.get('callee_method'), callee.get('callee_class'))
flowgraph.update({"forward":forward})
##### backward flow call graph
k = 0
def backwardcallgraph(init_callee_method, init_callee_class):
global k
k = k+1
fleveli = backwardflow(init_callee_method,init_callee_class)
for caller in fleveli:
nextlevel = backwardcallgraph(caller.get('caller_method'), caller.get('caller_class'))
flowgraph.update({"backward":backward})
###### call generate forward flow graph
#print('///////////////////////////////////////////////////////////////////////////')
#print('////////////////////////// FORWARD FLOW GRAPH /////////////////////////////')
#print('///////////////////////////////////////////////////////////////////////////')
forwardcallgraph(init_caller_method, init_caller_class)
#print(forward)
###### call generate forward flow graph
#print('///////////////////////////////////////////////////////////////////////////')
#print('////////////////////////// BACKWARD FLOW GRAPH ////////////////////////////')
#print('///////////////////////////////////////////////////////////////////////////')
backwardcallgraph(init_callee_method, init_callee_class)
#print(backward)
print(flowgraph)
| [
"kulani41@comp.nus.edu.sg"
] | kulani41@comp.nus.edu.sg |
71823020661ed6290534f1975e4bd38265fb1ff5 | 77c37bffac550aa48f146f9f082df75b53744d47 | /JMSSGraphics/Fire.py | 9c9625855804de7010331e6865750ad6f417372f | [] | no_license | toanh/JMSSGraphics | 95473d4ada68be47dc9d35ce699073d9eac3655a | 44493622b3b169cd6d064dc285f649d036375957 | refs/heads/master | 2021-05-12T00:54:16.501295 | 2018-05-13T12:22:31 | 2018-05-13T12:22:31 | 117,546,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | from JMSSGraphics import *
from Particle import *
import random
import math
jmss = Graphics(width = 800, height = 600, title = "Fire!", fps = 60)
images = []
images.append(jmss.loadImage("fire01.png"))
images.append(jmss.loadImage("fire02.png"))
images.append(jmss.loadImage("fire03.png"))
images.append(jmss.loadImage("fire04.png"))
images.append(jmss.loadImage("fire05.png"))
particles = []
def SpawnParticle(img, x, y, vel_x, vel_y, size, lifetime, rotation):
new_particle = Particle()
new_particle.img = img
new_particle.x = x
new_particle.y = y
new_particle.vel_x = vel_x * 60
new_particle.vel_y = vel_y * 60
new_particle.height = size
new_particle.width = size
new_particle.orig_height = size
new_particle.orig_width = size
new_particle.lifetime = lifetime
new_particle.life = lifetime
new_particle.rotation = rotation
particles.append(new_particle)
def UpdateParticles(dt):
for p in particles:
t = float(p.life) / p.lifetime
p.life -= dt
p.width = t * p.orig_width
p.height = t * p.orig_height
p.x += p.vel_x * dt
p.y += p.vel_y * dt
for p in particles:
if p.x < -p.width or p.x > jmss.width:
particles.remove(p)
continue
if p.y < -p.height or p.y > jmss.height:
particles.remove(p)
continue
if p.life < 0:
particles.remove(p)
continue
def DrawParticles():
for p in particles:
jmss.drawImage(p.img, p.x - p.width/2, p.y, p.width, p.height, \
p.rotation, 0.5, 0.5, opacity= 0.5)
@jmss.mainloop
def Game(dt):
for _ in range(5):
fire_img = random.choice(images)
size = fire_img.height + random.randint(-fire_img.height/6, fire_img.height/6)
size /= 1.2
rand_x = random.randint(-20, 20)
max_lifetime = (1 - (abs(rand_x) / 20.0)) * 1.5
x, y = jmss.getMousePos()
SpawnParticle(fire_img,
x + rand_x, \
y + random.randint(-15, 15),\
0, \
random.random() * 5 + 1, \
size,
0.25 + random.random() * max_lifetime,
(random.random() * 3.14159265359 / 4) - 3.14159265359 / 8)
jmss.set_blend_type(BLEND_ADDITIVE)
jmss.clear(0, 0, 0, 1)
UpdateParticles(dt)
DrawParticles()
jmss.drawText(str(len(particles)), 0, 0)
jmss.run() | [
"toan.kien@gmail.com"
] | toan.kien@gmail.com |
4b4e5b5f64b5b6c357d2474e845edb0281f3216f | 67296b720cc33df5cd8d8a5d45492128c21b0f90 | /collect_frames.py | 139cd487fac48e92b39d9d24f76518243edfccaf | [] | no_license | kaushikb258/World_Models | bd2b392f930e86380cc1a93a5f0e7cc12f5e68ff | d37e1a8f1b4cfae90fa038fa67557061a8e81a25 | refs/heads/master | 2020-04-10T21:23:13.903451 | 2018-12-11T07:49:00 | 2018-12-11T07:49:00 | 161,295,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import numpy as np
import sys
import os
import gym
from PIL import Image
import matplotlib.pyplot as plt
from utils import *
try:
os.stat("frames")
except:
os.mkdir("frames")
env = gym.make("CarRacing-v0")
episodes = 10000
#------------------------------------------------------------------------------------------
ii = 0
act = []
for ep in range(episodes):
s = env.reset()
tstep = 0
ep_reward = 0
while True:
tstep += 1
steer = np.random.uniform(low=-1.0, high=1.0)
acc = np.random.uniform(low=0.0, high=1.0)
br = np.random.uniform(low=0.0, high=0.2)
actions = [steer, acc, br]
env.render()
if (tstep > 50):
act.append(actions)
im = Image.fromarray(s[:82,:,:])
im.save("frames/frame_" + str(ii) + ".png")
ii += 1
next_s, reward, done, info = env.step(actions)
ep_reward += reward
if (tstep > 50):
if (not is_car_on_road(next_s[:82,:,:])):
done = True
if (done):
print("episode: ", ep, "episode reward: ", ep_reward)
break
else:
s = next_s
act = np.array(act)
np.save("actions", act)
print(act.shape)
| [
"kaushikb258@gmail.com"
] | kaushikb258@gmail.com |
9d81dd559f41247e65e5cff71490669e802b1643 | 629a4ae44605505c564def28a7de2d01dc4331bf | /src/ProgDBTutor/quote_data_access.py | d7f80d07652e86c4e923d910109491c5950014e2 | [] | no_license | lfereman/tutorial | 80e3b00676dd835632c8dbed441a7bfc55b96d75 | 19ed3f438987deb7a773312155cb9957137edda8 | refs/heads/master | 2021-04-30T12:26:26.372761 | 2018-02-13T12:42:45 | 2018-02-13T12:42:45 | 121,274,767 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | #Data Access Object pattern: see http://best-practice-software-engineering.ifs.tuwien.ac.at/patterns/dao.html
#For clean separation of concerns, create separate data layer that abstracts all data access to/from RDBM
#
#Depends on psycopg2 librarcy: see (tutor) https://wiki.postgresql.org/wiki/Using_psycopg2_with_PostgreSQL
import psycopg2
class DBConnection:
def __init__(self,dbname,dbuser,dbpass,dbhost):
try:
self.conn = psycopg2.connect("dbname='{}' user='{}' host='{}' password='{}'".format(dbname,dbuser,dbhost, dbpass))
except:
print('ERROR: Unable to connect to database')
raise Exception('Unable to connect to database')
def close(self):
self.conn.close()
def get_connection(self):
return self.conn
def get_cursor(self):
return self.conn.cursor()
def commit(self):
return self.conn.commit()
def rollback(self):
return self.conn.rollback()
class Quote:
def __init__(self, iden, text, author):
self.id = iden
self.text = text
self.author = author
def to_dct(self):
return {'id': self.id, 'text': self.text, 'author': self.author}
class QuoteDataAccess:
def __init__(self, dbconnect):
self.dbconnect = dbconnect
def get_quotes(self):
cursor = self.dbconnect.get_cursor()
cursor.execute('SELECT id, text, author FROM Quote')
quote_objects = list()
for row in cursor:
quote_obj = Quote(row[0],row[1],row[2])
quote_objects.append(quote_obj)
return quote_objects
def get_quote(self, iden):
cursor = self.dbconnect.get_cursor()
#See also SO: https://stackoverflow.com/questions/45128902/psycopg2-and-sql-injection-security
cursor.execute('SELECT id, text, author FROM Quote WHERE id=%s', (iden,))
row = cursor.fetchone()
return Quote(row[0],row[1],row[2])
def add_quote(self, quote_obj):
cursor = self.dbconnect.get_cursor()
try:
cursor.execute('INSERT INTO Quote(text,author) VALUES(%s,%s)', (quote_obj.text, quote_obj.author,))
#get id and return updated object
cursor.execute('SELECT LASTVAL()')
iden = cursor.fetchone()[0]
quote_obj.id = iden
self.dbconnect.commit()
return quote_obj
except:
self.dbconnect.rollback()
raise Exception('Unable to save quote!')
| [
"len.feremans@gmail.com"
] | len.feremans@gmail.com |
1f1a15327737df474e4091401068d90bf7b7a2d8 | df856d5cb0bd4a4a75a54be48f5b91a62903ee6e | /jishaku/__init__.py | be18c93d969f66dcdc330dc9e0ffd89dc6bb8cc2 | [
"MIT",
"Apache-2.0"
] | permissive | mortalsky/jishaku | 4c89bd69f6e1efcc45fcfdcc81427c71e10dc1de | 9cbbf64dd83697559a50c64653350253b876165a | refs/heads/master | 2023-07-20T04:55:19.144528 | 2021-01-22T08:18:12 | 2021-01-22T08:18:12 | 299,701,523 | 0 | 0 | MIT | 2020-09-29T18:16:24 | 2020-09-29T18:16:23 | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
"""
jishaku
~~~~~~~
A discord.py extension including useful tools for bot development and debugging.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
# pylint: disable=wildcard-import
from jishaku.cog import * # noqa: F401
from jishaku.features.baseclass import Feature # noqa: F401
from jishaku.meta import * # noqa: F401
__all__ = (
'Jishaku',
'Feature',
'setup'
)
| [
"sansgorialis@gmail.com"
] | sansgorialis@gmail.com |
88d871218ddc9d5a96e3ac821323d3bf566ce9b1 | fb05ae8048b188c7d73e45d0b0732223686eb4e4 | /dash-demo.py | 8c67cc6049a8940e154186d5777e2c72a2d37422 | [] | no_license | jluttine/dash-demo | 1b8bd0bf0b6570cf8e33c0fb9278390f37baa686 | 2eab4c7cd92b24214354d8a5e3bce866677efe50 | refs/heads/master | 2023-01-12T19:03:09.745917 | 2020-11-13T16:57:41 | 2020-11-13T16:57:41 | 312,356,690 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | import dash
import dash_html_components as html
import dash_core_components as dcc
from pages import demo1_graph, demo2_datatable
# Create the Dash app/server
app = dash.Dash(
__name__,
external_stylesheets=[
"https://codepen.io/chriddyp/pen/bWLwgP.css",
],
# We need to suppress these errors because when we define the callbacks,
# the subpage layouts haven't been defined yet.. So there would be errors
# about missing IDs. Is there some better solution?
suppress_callback_exceptions=True,
)
# List separate pages
subpages = [
("/demo-graph", demo1_graph),
("/demo-datatable", demo2_datatable),
]
# Generic page layout for the entire app
app.layout = html.Div(
[
# This element is used to read the current URL. Not visible to the
# user.
dcc.Location(id="url", refresh=False),
# The content will be rendered in this element so the children of this
# element will change when browsing to a different page
html.Div(
id="page-content",
className="DashboardContainer",
),
]
)
# Set callbacks for each page
for (_, page) in subpages:
page.set_callbacks(app)
# Layout of the main page
main_layout = html.Div(
className="Container",
children=[
html.H1("Plotly Dash demo"),
html.P(html.I("Jaakko Luttinen - November 16, 2020")),
html.P(html.I("Lead Data Scientist @ Leanheat by Danfoss")),
html.Ul(
[
html.Li([
"This demo is available at: ",
html.A(
"https://github.com/jluttine/dash-demo",
href="https://github.com/jluttine/dash-demo"
)
]),
html.Li("What is Plotly Dash?"),
html.Li("Why not Jupyter Notebooks?"),
]
),
] + [
html.A(
html.Div(
className="Card",
children=[
html.H2(page.title),
html.P(page.description),
]
),
href=url,
) for (url, page) in subpages
] + [
html.Ul([
html.Li([
"So much more cool features: ",
html.A(
"https://dash.plotly.com/",
href="https://dash.plotly.com/",
),
]),
html.Li("Show our real production Dash")
]),
]
)
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")]
)
def display_page(pathname):
"""Render the newly selected page when the URL changes"""
if pathname == "/":
return main_layout
page = dict(subpages)[pathname]
return html.Div(
[
# For subpages, add a few fixed elements at the top of the page
dcc.Link("< Back to main page", href="/"),
html.H1(page.title),
html.P(page.description),
# Then, the actual subpage content
page.layout,
]
)
if __name__ == "__main__":
app.run_server(debug=True)
| [
"jaakko.luttinen@iki.fi"
] | jaakko.luttinen@iki.fi |
bfb6270e3c9d1dea3f85b30e18b9ac93406c9354 | 3041068cd9882211a21d5a88b3843b21ff221ff1 | /bookclub/migrations/0004_notes.py | a610c96eee1764c359c9da46b3accfeef64b92d2 | [] | no_license | yardenroee/OriginBookClub | 82b7cbdd1aa0f2386242a06e020f8efc2384d0cd | 6305c6c333e490323ddc6b13d7ba98cef52e7828 | refs/heads/master | 2021-09-24T11:13:02.780516 | 2020-02-04T04:45:14 | 2020-02-04T04:45:14 | 236,394,318 | 0 | 0 | null | 2021-09-22T18:37:06 | 2020-01-27T00:05:01 | Python | UTF-8 | Python | false | false | 510 | py | # Generated by Django 3.0.2 on 2020-01-30 05:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookclub', '0003_auto_20200129_0047'),
]
operations = [
migrations.CreateModel(
name='Notes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(default='')),
],
),
]
| [
"yardenroee@gmail.com"
] | yardenroee@gmail.com |
c42cc045d3613843df744ac6b74f7a368d40170e | f46e5ab4747d113215e46240eee4d75509e4be0d | /tests.py | 2dd01180f049fb3cb67a16cefd56d899698aae9a | [
"MIT"
] | permissive | xmonader/objsnapshot | 0d2dc17f9637dfe614332f125af5d867a8110118 | ab639630e6762a1d7c8e7df251f959e27e270e4e | refs/heads/master | 2021-01-22T06:19:26.026384 | 2017-05-30T13:12:22 | 2017-05-30T13:12:22 | 92,542,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | from .objsnapshot import commit, rollback
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def inc(self, by=None):
if by is None:
by = self.age
self.age += by
def __str__(self):
return "{} {} ".format(self.name, self.age)
def godangerous(self):
self.name = "mr x"
self.age = 90
class MovingBall:
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
def move2(self, x, y):
self.x = x
self.y = y
__str__ = lambda self: "{} {}".format(self.x, self.y)
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
### Examples
def test_commit_state():
h = Human("Ahmed", 50)
mb = MovingBall(0, 0)
commit1 = commit(h)
assert commit1.state['name'] == 'Ahmed'
assert commit1.state['age'] == 50
assert len(commit1.state) == 2
h.inc(20)
h.inc(2)
commit2 = commit(h)
assert commit2.state['name'] == 'Ahmed'
assert commit2.state['age'] != 50
assert commit2.state['age'] == 72
assert len(commit2.state) == 2
h.godangerous()
commit3 = commit(h)
assert commit3.state['name'] == 'mr x'
assert len(commit3.state) == 2
## be good again
h = rollback(h, commit1)
assert h.name == 'Ahmed'
assert h.age == 50
commit1 = commit(mb)
assert len(commit1.state) == 2
assert commit1.state['x'] == 0
assert commit1.state['y'] == 0
mb.move2(5, 124)
commit2 = commit(mb)
assert commit2.state['x'] == 5
print(commit2.state)
assert commit2.state['y'] == 124
assert len(commit2.state) == 2
mb = rollback(mb, commit1)
assert mb.x == 0
assert mb.y == 0
| [
"xmonader@gmail.com"
] | xmonader@gmail.com |
f34e145964579b358a43c5aeec09cc5535f79280 | 2ba0293397610e2f30e9a7038c65db6f6bbe974f | /Moves.py | 9af1d58229e9e3b17ba7e8d35c8959fe42c7ae42 | [] | no_license | Giruvagen/TextAdv | 2dcfcb21fe1612fdc812285fa2ad25e0086fe92d | cc58e960af749d59b475e81a508eb3e88245a063 | refs/heads/master | 2020-03-22T16:41:10.646193 | 2018-07-13T22:39:15 | 2018-07-13T22:39:15 | 140,343,760 | 0 | 0 | null | 2018-07-15T18:30:18 | 2018-07-09T21:24:31 | Python | UTF-8 | Python | false | false | 2,520 | py | #defines movement
import random
from content import mapdesc
from content import monsters
class motion(object):
def __init__(self, char):
self.char = char
def battle(self):
print("A monster appears!")
self.monster = random.choice(list(monsters.items()))
print("It's a wild {:s}!".format(self.monster))
self.move(self.direction)
def move(self, direction):
if self.direction == "north":
self.starty += 1
if self.starty > 2:
self.starty = 0
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "south":
self.starty -= 1
if self.starty < 0:
self.starty = 2
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "east":
self.startx += 1
if self.startx > 2:
self.startx = 0
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
elif self.direction == "west":
self.startx -= 1
if self.startx < 0:
self.startx = 2
print(mapdesc[(self.startx,self.starty)])
else:
print(mapdesc[(self.startx,self.starty)])
self.battlechance = random.randint(0,8)
if self.battlechance == 1:
self.battle()
else:
print("Please choose a valid direction: north, south, east or west!")
self.direction = input("Choose your direction of travel: ")
self.move(self.direction)
def startmove(self):
self.startx = random.randint(0,2)
self.starty = random.randint(0,2)
print("Here begins your adventure, {:s}, at spot {:d},{:d}".format(self.char,self.startx,self.starty))
print(mapdesc[(self.startx,self.starty)])
self.direction = input("Choose your direction of travel: ")
self.move(self.direction)
| [
"40126586+Giruvagen@users.noreply.github.com"
] | 40126586+Giruvagen@users.noreply.github.com |
b3f0bdd99f6bee334536b269df6e3f5644de88b7 | 4b14a94152356019675f3d2ac5d668b2459cf153 | /event_app/myenv/bin/easy_install | cf53bcefe41418c2b60872bdcd5909a47f635904 | [] | no_license | arvicz22/eventapp | 20b0360a22b6b7a57b7cc55beca9d0e398161372 | 6bbe8666b31db51262f51992fa14c19137777c90 | refs/heads/master | 2021-01-15T21:44:52.596047 | 2014-03-13T07:34:03 | 2014-03-13T07:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | #!/home/eric/Desktop/event_app/myenv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.9.8','console_scripts','easy_install'
__requires__ = 'setuptools==0.9.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install')()
)
| [
"arvicz22@gmail.com"
] | arvicz22@gmail.com | |
d94e89c1a33b604c6d09bbc9becb67b7a20f9699 | 58ed092530bdb8a6f2bef0ebe261108e8a9c0aff | /myapp/search_indexes.py | abee499518bdc025949a48c1812c21f7f0f7d02f | [] | no_license | 100Rashmi/myTweeter | 2373fcb2ddf09432bfc17d1ddada935c3a29d6d1 | d7a5c067ee639347604a3cdaa11c14c0e11d5515 | refs/heads/master | 2022-12-27T00:06:52.553893 | 2017-11-06T22:16:13 | 2017-11-06T22:16:13 | 109,440,649 | 0 | 0 | null | 2022-12-07T23:47:29 | 2017-11-03T20:37:40 | Python | UTF-8 | Python | false | false | 1,292 | py | import datetime
from haystack import indexes
from myapp.models import Dweet, User
class DweetIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
content = indexes.CharField(model_attr='dweet_data')
dweet_id = indexes.CharField(model_attr='dweet_id')
created_time = indexes.DateTimeField(model_attr='created_time')
def get_model(self):
return Dweet
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(created_time__lte=datetime.datetime.now())
class UserIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
user_id = indexes.CharField(model_attr='user_id')
user_first_name = indexes.CharField(model_attr='user_first_name')
user_last_name = indexes.CharField(model_attr='user_last_name')
user_profile_name = indexes.CharField(model_attr='user_profile_name')
modified_time = indexes.DateTimeField(model_attr='modified_time')
def get_model(self):
return User
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(modified_time__lte=datetime.datetime.now())
| [
"singhrashmi579@adya.io"
] | singhrashmi579@adya.io |
ff0717b66ccf936a78722d90e6d31dada09a8cf5 | 590c0fa3a144146d9ba3caf0ac7ff9e0a9e5c914 | /packages/riot/tft/__init__.py | 24c4fca9f3dd6ba4c7eb2a5045b91baa86f7a812 | [] | no_license | shoko31/InKeeperBot | 7154fbe1b2ac82a2ac5defe069927a00b0f37952 | c5fc16fc3ff0b2a113feb67e1d8e2c8dd7507b72 | refs/heads/master | 2023-05-25T23:00:41.163366 | 2020-07-23T11:45:49 | 2020-07-23T11:45:49 | 220,335,670 | 0 | 0 | null | 2023-05-22T21:38:27 | 2019-11-07T21:52:32 | Python | UTF-8 | Python | false | false | 181 | py | # __init__.py
from .tft_user import TFTUser
from .tft_game import TFTGame
from .tft_participant import TFTParticipant
from .tft_trait import TFTTrait
from .tft_unit import TFTUnit
| [
"elliott.zz59@gmail.com"
] | elliott.zz59@gmail.com |
ee0501d3fc56808a4fd300c256c1bd3071ec5d4c | fc4fa38962b121e31edf414a860a09caeb8048d2 | /Homework Problems & Solutions/HW 1/Shaik_Alimulla_HW1.py | df4f656e0119a969169fffd2d38f7f76cd6eb962 | [] | no_license | shaikalimulla/Data-Mining | 36da27846d4250eb232fdaa9a3f40195f0cd1a88 | 26311593a6ee902a5d148de22658cf703e2ef665 | refs/heads/master | 2016-09-14T12:50:35.244013 | 2016-04-30T02:47:04 | 2016-04-30T02:47:04 | 57,423,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | #Assignment based on MAGIC Gamma Telescope Data Set ( http://archive.ics.uci.edu/ml/datasets/MAGIC+Gamma+Telescope )
import argparse
import numpy as np
class dataSet:
"""
Class to store the MAGIC Gamma Telescope Data Set
"""
def __init__(self, location):
with open (location, "r") as myfile:
self.readData=myfile.readlines();
def calculate( data, ithAttribute):
"""
Input Parameters:
data: The data that is read from the file.
ithAttribute: The ith Attribute for which the various properties must be calculated.
Default value of 0,infinity,-infinity are assigned to all the variables as required.
Objective of the function is to calculate: N (number of objects), min, max, mean, standard deviation, Q1, median, Q3, IQR
"""
noOfObjects , minValue , maxValue , mean , standardDeviation , q1 , median , q3 ,iqr = [0,"inf","-inf",0,0,0,0,0,0]
result = []
for x in data:
result.append(float(x.split(',')[ithAttribute-1]))
noOfObjects = np.size(result)
minValue = min(result)
maxValue = max(result)
mean = np.mean(result)
standardDeviation = np.std(result)
q1 = np.percentile(result, 25)
median = np.median(result)
q3 = np.percentile(result, 75)
iqr = abs(q3-q1)
return noOfObjects , minValue , maxValue, mean, standardDeviation , q1 , median , q3 , iqr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Data Mining HW1')
parser.add_argument('--i', type=int,
help="ith attribute of the dataset ( limit 1 to 10 )",
default=5,
choices=set((1,2,3,4,5,6,7,8,9,10)) ,
required=True)
parser.add_argument("--data", type=str,
help="Location of the downloaded file",
default="magic04.data.txt",
required=False)
args = parser.parse_args()
data = dataSet(args.data)
print(','.join(map(str,calculate(data.readData,args.i))))
| [
"alimulla.shaik@gmail.com"
] | alimulla.shaik@gmail.com |
fe3f96a2af6475819c782c04a2b8e8b6b3e3d814 | 52a7b1bb65c7044138cdcbd14f9d1e8f04e52c8a | /budget/urls.py | c353880f983753ec457815a9fa5d6fa7951041ab | [] | no_license | rds0751/aboota | 74f8ab6d0cf69dcb65b0f805a516c5f94eb8eb35 | 2bde69c575d3ea9928373085b7fc5e5b02908374 | refs/heads/master | 2023-05-03T00:54:36.421952 | 2021-05-22T15:40:48 | 2021-05-22T15:40:48 | 363,398,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('app/',views.index,name='index'),
path('add_item/',views.add_item,name='add item'),
] | [
"you@example.com"
] | you@example.com |
0878d152efa09a7a635eab712d773508164c86c7 | 9ad765ac96f6534addcd44d55527e4196f4f6603 | /leetcode/Num_51_N_Queens.py | fad6e3ea43cbbf1c7ce321cda5d3f5a4204e672a | [] | no_license | salalals/ojPython2 | 134838c21f75afa23fc99b5398e9b0564bc7bb5f | f61a7d7af02bc6f3474dd2832c21c2c6ea5635b8 | refs/heads/master | 2021-09-01T01:44:17.254272 | 2017-12-24T07:22:59 | 2017-12-24T07:22:59 | 111,899,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
'''
递归
思路简单,python写清楚实现还是需要练习
'''
solutions = []
self.solve_n_queens(n, 0, [], solutions)
return map(lambda sol: self.translate_solution(n, sol), solutions)
def solve_n_queens(self, n, start, part_sol, solutions):
if start == n:
solutions.append(part_sol)
for col in range(n):
if col not in part_sol and not any(map(lambda prev_col_ind: abs(col - part_sol[prev_col_ind]) == start - prev_col_ind, range(len(part_sol)))):
self.solve_n_queens(n, start + 1, part_sol[:] + [col], solutions)
def translate_solution(self, n, solution):
"""
:param solution: list[int]
:return: List[str]
"""
return map(lambda ind: "." * ind + "Q" + "." * (n - ind - 1), solution)
| [
"lshuo@amazon.com"
] | lshuo@amazon.com |
86865a380e10df0386ac53bd7aac552daf77e862 | ba6c64c6f8d348a86c16395aaa5f8fadc6cf4386 | /python/lab3/weather_today.py | 55eb99344a6e96084ada05a6f8a6b5ec355564cf | [] | no_license | AmalM7/DataScienceAcademy | 875f00b1909a3b9ba76e178852db7aa6e851e220 | aa3719465f9582436f511ce56ad94cdf59354dca | refs/heads/master | 2020-03-30T19:21:32.129618 | 2018-10-07T19:59:39 | 2018-10-07T19:59:39 | 151,538,683 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import requests
import sys
if len(sys.argv)==1:
print("you have to inter the city name")
sys.exit(0)
else:
city=sys.argv[1]
api_key="bc3dbc9f88d3d484ee1865b765665f1b"
class Weather:
def __init__(self, key):
self.key=key
def get_city_weather(self, city):
r=requests.get("http://api.openweathermap.org/data/2.5/weather?q="+city+"&appid="+self.key)
return r.json()
def show_data(self, json_object):
print("The temperature is" , json_object["main"]["temp"])
print("The humidity is", json_object["main"]["humidity"])
print("The weather description is", json_object["weather"][0]["description"])
weather_today=Weather(api_key)
obj=weather_today.get_city_weather(city)
weather_today.show_data(obj)
| [
"noreply@github.com"
] | noreply@github.com |
14cf80ef32e4bf3d66b3d4e93f8898f83441fbf8 | f1aeae7a5256ad26c3808375ed8bfd5c8d649825 | /config/lan_scope.py | 917412ce34627e538870a712375c434200ef612c | [] | no_license | wikimedia/phlogiston | 4f3f1f5de2e53027ba5c2ebfd69caa381da9dd38 | eb57b5adc7dc193dc3c4a94b6ffb68cca4984741 | refs/heads/master | 2023-06-30T12:28:33.190327 | 2019-03-28T03:05:23 | 2019-03-28T03:05:23 | 40,551,004 | 13 | 4 | null | 2016-10-18T20:01:47 | 2015-08-11T15:51:58 | Python | UTF-8 | Python | false | false | 93 | py | [vars]
scope_title = Language
default_points = 0
start_date = 2016-07-01
show_points = False
| [
"jaufrecht@wikimedia.org"
] | jaufrecht@wikimedia.org |
a605b1d00537686bda9a0600a32decd33694a451 | 698512c01048fcefcc14583089ef2e8c7962923a | /Python_Projects/Data_Visualization/Chap17_working_w_API/python_repos.py | 71713ed6cee6b967da24332c0021287834a44c1f | [] | no_license | Miguel-Tirado/Python | c76cb9846c9a2b9c6b3c4827cdb95042f4e5d447 | 227def380c64095c3040c848aa035ac46d26d079 | refs/heads/main | 2023-04-16T12:15:03.301275 | 2021-04-30T16:39:48 | 2021-04-30T16:39:48 | 346,443,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | import requests
# Make an API call and store the responce
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept' : 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
print(f"Status code: {r.status_code}")
# store API responce in a variable
# convert from json format to python dictionary format
responce_dict = r.json()
print(f"Total repositories: {responce_dict['total_count']}")
# Explore information about the repositories
repo_dicts = responce_dict['items']
print(f"Repositories returned: {len(repo_dicts)}")
# Examine the first repository
repo_dict = repo_dicts[0]
print("\nSelected information about the first repository:")
for repo_dict in repo_dicts:
print(f"Name: {repo_dict['name']}")
print(f"Owner: {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Created: {repo_dict['created_at']}")
print(f"Updated: {repo_dict['updated_at']}")
print(f"Description: {repo_dict['description']}\n")
print(f"\nKeys: {len(repo_dict)}")
for key in sorted(repo_dict.keys()):
print(key)
# Process results
# when working with more complex API's its important to check 'incomplete_results'
# note that incomplte_results = False means the request was sucessful since (its not incomplete)
incomplete_results = responce_dict['incomplete_results']
print(responce_dict.keys())
# checking to see if incomplete_results is true or false?
# false means were the request was sucessful
# Note that sometimes if incomplte_results is true doesnt always mean the infor is incomplete
# Git API documentation states that it could be reaching a timeout or the request has already been
# made before
print(incomplete_results)
| [
"miguel.e.tirado11@gmail.com"
] | miguel.e.tirado11@gmail.com |
972e563f6cf199234a7a2dbed0586d79bbd072c2 | ab961b490dda45dc99faa3d4c8c5db75ada0448c | /explore.py | b05c75582753ab057e697619bfc1bd88a9aafb89 | [] | no_license | harperpack/budget-viz | eb3f1bebfd3e2aaf5b6b8644dd32bf87aec6714a | 0495c7916c917abca9c1ae8e206c6fa4484c2aef | refs/heads/master | 2022-11-23T08:32:53.759070 | 2020-07-20T01:12:12 | 2020-07-20T01:12:12 | 276,770,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,403 | py | import pandas as pd
import numpy as np
import json
#budget_data = "/Users/harper/Documents/harper_files/projects/budget_data/2019__2020__and_2021_Budget_Data.csv"
budget_data = "/Users/harper/Documents/harper_files/projects/budget-viz/2019__2020__and_2021_Budget_Data.csv"
bdf = pd.read_csv(budget_data)
# accounts = ['Revenues','Expenses']
# budget = {}
# print(bdf.head())
# print(1/0)
class SubAccount:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
class Account:
def __init__(self, name):
self.name = name
self.sub_accounts = []
self.revenue = 0
self.expense = 0
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=sub))
else:
return match[0]
class Fund:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
class Unit:
def __init__(self, name):
self.name = name
self.revenue = 0
self.expense = 0
self.accounts = []
self.sub_accounts = []
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate subs with {a}".format(a=sub))
else:
return match[0]
class Dept:
def __init__(self, name):
self.name = name
self.funds = []
self.units = []
self.accounts = []
self.sub_accounts = []
self.revenue = 0
self.expense = 0
def fetch_fund(self, fund):
match = [fnd for fnd in self.funds if fnd.name == fund]
if not match:
self.funds.append(Fund(fund))
return self.funds[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {a}".format(a=fund))
else:
return match[0]
def fetch_unit(self, unit):
if unit.upper() in ['CITY CLERK','CITY COUNCIL','REPARATIONS FUND','HOME FUND','INTERFUND TRANSFERS','SPECIAL ASSESSMENT']:
unit = unit.replace(' ','_')
match = [b_unit for b_unit in self.units if b_unit.name == unit]
if not match:
self.units.append(Unit(unit))
return self.units[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {a}".format(a=unit))
else:
return match[0]
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate subs with {a}".format(a=sub))
else:
return match[0]
class Budget:
def __init__(self, name, df, output):
self.name = name
self.funds = []
self.depts = []
self.units = []
self.accounts = []
self.sub_accounts = []
self.revenue = 0
self.expense = 0
self.type_name = "Account Type"
self.fund_name = "Fund"
self.dept_name = "Department"
self.unit_name = "Business Unit"
self.acct_name = "Account Classification"
self.subs_name = "Account Code And Description"
self.vals_name = ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]
self.main(df, output)
def obtain_value(self, row):
sum = 0
for val_name in self.vals_name:
sum += float(row[val_name].replace(",",''))
#print(sum / len(self.vals_name))
return int(sum / len(self.vals_name))
def get_objects(self, row):
# top level
dept = self.fetch_dept(row[self.dept_name].title())
fund = self.fetch_fund(row[self.fund_name].title())
unit = self.fetch_unit(row[self.unit_name].title())
account = self.fetch_account(row[self.acct_name].title())
sub_account = self.fetch_subs(row[self.subs_name].title())
# dept level
dept_fund = dept.fetch_fund(row[self.fund_name].title())
dept_unit = dept.fetch_unit(row[self.unit_name].title())
dept_account = dept.fetch_account(row[self.acct_name].title())
dept_sub_account = dept.fetch_subs(row[self.subs_name].title())
# unit level
unit_account = dept_unit.fetch_account(row[self.acct_name].title())
unit_sub_account = dept_unit.fetch_subs(row[self.subs_name].title())
# account level
account_sub_account = unit_account.fetch_subs(row[self.subs_name].title())
return [dept, fund, unit, account, sub_account, dept_fund, dept_unit, dept_account, dept_sub_account, unit_account, unit_sub_account, account_sub_account]
def tally_row(self, row):
value = self.obtain_value(row)
objects = self.get_objects(row)
if row[self.type_name] == "Revenues":
for obj in objects:
obj.revenue += value
self.revenue += value
elif row[self.type_name] == "Expenses":
for obj in objects:
obj.expense += value
self.expense += value
else:
print("ERROR: Unknown classification with {t}".format(t=row["Account Type"]))
def fetch_dept(self, dept):
match = [department for department in self.depts if department.name == dept]
if not match:
self.depts.append(Dept(dept))
return self.depts[-1]
elif len(match) > 1:
print("ERROR: Duplicate depts with {d}".format(d=dept))
else:
return match[0]
def fetch_fund(self, fund):
match = [fnd for fnd in self.funds if fnd.name == fund]
if not match:
self.funds.append(Fund(fund))
return self.funds[-1]
elif len(match) > 1:
print("ERROR: Duplicate funds with {d}".format(d=fund))
else:
return match[0]
def fetch_unit(self, unit):
if unit.upper() in ['CITY CLERK','CITY COUNCIL','REPARATIONS FUND','HOME FUND','INTERFUND TRANSFERS','SPECIAL ASSESSMENT']:
unit = unit.replace(' ','_')
match = [b_unit for b_unit in self.units if b_unit.name == unit]
if not match:
self.units.append(Unit(unit))
return self.units[-1]
elif len(match) > 1:
print("ERROR: Duplicate units with {d}".format(d=unit))
else:
return match[0]
def fetch_account(self, acct):
match = [account for account in self.accounts if account.name == acct]
if not match:
self.accounts.append(Account(acct))
return self.accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=acct))
else:
return match[0]
def fetch_subs(self, sub):
match = [sub_account for sub_account in self.sub_accounts if sub_account.name == sub]
if not match:
self.sub_accounts.append(SubAccount(sub))
return self.sub_accounts[-1]
elif len(match) > 1:
print("ERROR: Duplicate accts with {a}".format(a=sub))
else:
return match[0]
def format(self, number):
num = str(int(number))
length = len(num)
if length < 4:
return ''.join(['$',num])
output = ''
while length > 3:
output = num[-3:] + output
output = ',' + output
num = num[:-3]
length = len(num)
output = '$' + num + output
return output
def ratio(self, numerator, denominator):
if denominator:
return int(100 * (numerator/denominator))
elif numerator:
return "ERROR"
else:
return 'n/a'
def classify(self, name):
if name in [x.name for x in self.depts]:
return "Department"
elif name in [x.name for x in self.funds]:
return "Fund"
elif name in [x.name for x in self.units]:
return "Unit"
elif name in [x.name for x in self.accounts]:
return "Account"
elif name in [x.name for x in self.sub_accounts]:
return "Item"
else:
print("ERROR: cannot locate {n}".format(n=name))
def output(self):
budget = {"schema":["Department","Fund","Unit","Account","Item"],"revenue":{},"expense":{}}
budget["revenue"][self.name] = {"type":"Total","members":{"total":(self.revenue, self.ratio(self.revenue,self.revenue))}}
budget["expense"][self.name] = {"type":"Total","members":{"total":(self.expense, self.ratio(self.expense,self.expense))}}
#print([x.name for x in self.depts if x.name in [y.name for y in self.funds]])
# print([x.name for x in self.depts if x.name in [y.name for y in self.units]])
# print([x.name for x in self.funds if x.name in [y.name for y in self.units]])
#print([x.name for x in self.depts if x.name in [y.name for y in self.accounts]])
#print([x.name for x in self.depts if x.name in [y.name for y in self.sub_accounts]])
# print(sorted([x.name for x in self.depts]))
# print(sorted([x.name for x in self.funds]))
#print(sorted([x.name for x in self.units]))
# print(sorted([x.name for x in self.accounts]))
# print(sorted([x.name for x in self.sub_accounts]))
# print(1/0)
all_objs = self.depts + self.funds + self.units + self.accounts + self.sub_accounts
for obj in all_objs:
obj_type = self.classify(obj.name)
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
budget["revenue"][obj.name] = {"type":obj_type,"members":{"total":(obj.revenue,self.ratio(obj.revenue,self.revenue))}}
else:
print("ERROR: duplicate rev dept with {o}".format(o=obj.name))
if obj.expense:
if not budget["expense"].get(obj.name,''):
budget["expense"][obj.name] = {"type":obj_type,"members":{"total":(obj.expense,self.ratio(obj.expense,self.expense))}}
else:
print("ERROR: duplicate exp dept with {o}".format(o=obj.name))
for dept in self.depts:
all_objs = dept.funds + dept.units + dept.accounts + dept.sub_accounts
for obj in all_objs:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{dept.name:(obj.revenue,self.ratio(obj.revenue,dept.revenue))}}
else:
budget["revenue"][obj.name]["members"][dept.name] = (obj.revenue,self.ratio(obj.revenue,dept.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{dept.name:(obj.expense,self.ratio(obj.expense,dept.expense))}}
else:
budget["expense"][obj.name]["members"][dept.name] = (obj.expense,self.ratio(obj.expense,dept.expense))
for unit in dept.units:
all_objs = unit.accounts + unit.sub_accounts
for obj in all_objs:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{unit.name:(obj.revenue,self.ratio(obj.revenue,unit.revenue))}}
else:
budget["revenue"][obj.name]["members"][unit.name] = (obj.revenue,self.ratio(obj.revenue,unit.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{unit.name:(obj.expense,self.ratio(obj.expense,unit.expense))}}
else:
budget["expense"][obj.name]["members"][unit.name] = (obj.expense,self.ratio(obj.expense,unit.expense))
for account in unit.accounts:
for obj in account.sub_accounts:
if obj.revenue:
if not budget["revenue"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["revenue"][obj.name] = {"type":obj_type,"members":{account.name:(obj.revenue,self.ratio(obj.revenue,account.revenue))}}
else:
budget["revenue"][obj.name]["members"][account.name] = (obj.revenue,self.ratio(obj.revenue,account.revenue))
if obj.expense:
if not budget["expense"].get(obj.name,''):
obj_type = self.classify(obj.name)
budget["expense"][obj.name] = {"type":obj_type,"members":{account.name:(obj.expense,self.ratio(obj.expense,account.expense))}}
else:
budget["expense"][obj.name]["members"][account.name] = (obj.expense,self.ratio(obj.expense,account.expense))
with open("./budget.json", 'w', encoding='utf-8') as f:
json.dump(budget, f, ensure_ascii=False, indent=4)
def rank_print(self):
ranked_rev_depts = reversed(sorted(self.depts, key=lambda dept: dept.revenue))
ranked_exp_depts = reversed(sorted(self.depts, key=lambda dept: dept.expense))
ranked_rev_accts = reversed(sorted(self.accounts, key=lambda acct: acct.revenue))
ranked_exp_accts = reversed(sorted(self.accounts, key=lambda acct: acct.expense))
print("Departments by Revenue: \n")
for rank, dept in enumerate(ranked_rev_depts, start=1):
ratio = self.ratio(dept.revenue,self.revenue)
if ratio < 5:
leftover = len(self.depts) - rank
print("{r} - {f}: Other ({l} departments)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=dept.name,m=self.format(dept.revenue),x=ratio))
print("-----\nDepartments by Expense: \n")
for rank, dept in enumerate(ranked_exp_depts, start=1):
ratio = self.ratio(dept.expense,self.expense)
if ratio < 5:
leftover = len(self.depts) - rank
print("{r} - {f}: Other ({l} departments)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=dept.name,m=self.format(dept.expense),x=ratio))
print("\n=====\nAccounts by Revenue: \n")
for rank, acct in enumerate(ranked_rev_accts, start=1):
ratio = self.ratio(acct.revenue,self.revenue)
if ratio < 5:
leftover = len(self.accounts) - rank
print("{r} - {f}: Other ({l} accounts)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=acct.name,m=self.format(acct.revenue),x=ratio))
print("-----\nAccounts by Expense: \n")
for rank, acct in enumerate(ranked_exp_accts, start=1):
ratio = self.ratio(acct.expense,self.expense)
if ratio < 5:
leftover = len(self.accounts) - rank
print("{r} - {f}: Other ({l} accounts)".format(r=rank,f=rank+leftover,l=leftover))
break
print("{r}: {d}\n\t{m}\t({x}%)\n".format(r=rank,d=acct.name,m=self.format(acct.expense),x=ratio))
def verbose_print(self):
print("Total budget for {n}:".format(n=self.name))
print(">Revenue: {r}".format(r=self.format(self.revenue)))
print(">Expense: {e}".format(e=self.format(self.expense)))
print("------\n")
for dept in self.depts:
print("{d}:".format(d=dept.name))
print("->Total Revenue: {r}\t({x}% of total)".format(r=self.format(dept.revenue),x=self.ratio(dept.revenue,self.revenue)))
print("->Total Expense: {e}\t({x}% of total)".format(e=self.format(dept.expense),x=self.ratio(dept.expense,self.expense)))
print("\n")
for acct in dept.accounts:
print('--{a}'.format(a=acct.name))
print('---> R: {r}\t({x}%)'.format(r=self.format(acct.revenue),x=self.ratio(acct.revenue,dept.revenue)))
print('---> E: {e}\t({x}%)'.format(e=self.format(acct.expense),x=self.ratio(acct.expense,dept.expense)))
def main(self, df, output):
for index, row in df.iterrows():
self.tally_row(row)
# print(count)
# print(len(self.revenue) + len(self.expense))
# print(self.revenue)
# print(self.expense)
# print(1/0)
if output == "verbose":
self.verbose_print()
elif output == "rank":
self.rank_print()
elif output == "output":
self.output()
Budget("Evanston",bdf,"output")
# cols = []
# for col in bdf:
# if col == "Account Type":
# continue
# elif col in ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]:
# continue
# cols.append(col)
#
# for index, row in bdf.iterrows():
# dept = row["Department"]
# unit = row["Business Unit"]
# amount = float(row["2020 Adopted Budget"].replace('.','').replace(',',''))
# type = row["Account Type"]
# if not budget.get(dept,''):
# #budget[dept] = {"R19":0,"Ex19":0,"R20":0,"Ex20":0,"R21":0,"Ex21":0}
# budget[dept] = {"Revenues":0,"Expenses":0}
# # budget[dept] = {"2019":0,"2020":0,"2021":0}
# if not budget[dept].get(unit,''):
# # budget[dept][unit] = {"2019":0,"2020":0,"2021":0}
# budget[dept][unit] = {"Revenues":0,"Expenses":0}
# budget[dept][type] += amount
# budget[dept][unit][type] += amount
# for department, value in budget.items():
# print('------\n')
# print(department,"\t","R: ",value["Revenues"],"\t","E: ",value["Expenses"])
# for unit, details in value.items():
# if unit in ['Revenues','Expenses']:
# continue
# print('--> ',unit,"\t","R: ",details["Revenues"],"\t","E: ",details["Expenses"])
#
# # funds = {"Revenues":[],"Expenses":[]}
# # for index, row in bdf.iterrows():
# # # print(index)
# # # print(row["Fund"])
# # # print(row["Account Type"])
# # # print(1/0)
# # if row["Fund"] not in funds[row["Account Type"]]:
# # funds[row["Account Type"]].append(row["Fund"])
# # for key, value in funds.items():
# # print("-----\n")
# # print(key)
# # print(value)
# # print("\n")
# # print(1/0)
#
#
#
# # for account in accounts:
# # # budget[account] = {}
# # for col in bdf:
# # if col in accounts:
# # continue
# # elif col in ["2019 Adopted Budget","2020 Adopted Budget","2021 Projected Budget"]:
# # continue
# # if not budget.get(col,''):
# # budget[col] = {"same":False,"Revenues":[],"Expenses":[]}
# # budget[col][account] = bdf[col].unique()
# # # if not budget[col][account].all():
# # # print(bdf[col].unique())
# # # print("Wump")
# # # else:
# # # print(col,account)
# # # print(bdf[col].unique())
# # if np.array_equal(budget[col]["Revenues"],budget[col]["Expenses"]):
# # budget[col]["same"] = True
# # else:
# # budget[col]["same"] = False
# # for column, value in budget.items():
# # print("-----\n")
# # print(column)
# # if value["same"]:
# # print(value["Revenues"])
# # else:
# # print("~Revenues")
# # print(value["Revenues"])
# # print("\n")
# # print("~Expenses")
# # print(value["Expenses"])
| [
"charlespack2019@u.northwestern.edu"
] | charlespack2019@u.northwestern.edu |
3090368248d3f1123c7946855c97dbc0ec1154e9 | 4fd84e0e1097d1153ed477a5e76b4972f14d273a | /myvirtualenv/lib/python3.7/site-packages/azure/mgmt/iothub/models/certificate_properties.py | d91afb9c0adb00d0e035b9e1023cc3ad459f53fc | [
"MIT"
] | permissive | peterchun2000/TerpV-U | c045f4a68f025f1f34b89689e0265c3f6da8b084 | 6dc78819ae0262aeefdebd93a5e7b931b241f549 | refs/heads/master | 2022-12-10T09:31:00.250409 | 2019-09-15T15:54:40 | 2019-09-15T15:54:40 | 208,471,905 | 0 | 2 | MIT | 2022-12-08T06:09:33 | 2019-09-14T16:49:41 | Python | UTF-8 | Python | false | false | 2,165 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateProperties(Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: datetime
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
}
def __init__(self, **kwargs):
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
| [
"peterchun2000@gmail.com"
] | peterchun2000@gmail.com |
9918925b5893ab5e67cfe34926eb8f39e50a3f68 | a5b3c17361b0d68818a0088d2632706353aa768f | /app/core/urls.py | 2c01c9dc76b59d446a2cc277aaf6d2d00a8d8820 | [] | no_license | marcinpelszyk/django-docker-compose-deploy | 7bd6d91a08aa4c60fd801115e4277d26cfd77642 | 6e4716d5324172778e5babecb40952de66448301 | refs/heads/main | 2023-06-06T02:56:44.709915 | 2021-06-28T15:38:56 | 2021-06-28T15:38:56 | 380,349,649 | 0 | 1 | null | 2021-06-28T08:10:53 | 2021-06-25T20:42:07 | Python | UTF-8 | Python | false | false | 387 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"marcin.pelszyk90@gmail.com"
] | marcin.pelszyk90@gmail.com |
069744ff71226f81911edc60bc48d467b30ed337 | b2a2a2f7e19fc8e9c6f5d2dedb0b4b10d7c813ae | /backend/api/util/functional/curry.py | 1a66ea3074629bc98d88a47586b5b1aef9541bb3 | [] | no_license | glenstarchman/bar-rate | d7a6e6660bd3fafe7777d435d33334e2be4d0480 | 575e5f695650487a679ede04af6f62d464c53c18 | refs/heads/master | 2022-02-27T01:31:17.879000 | 2019-09-26T14:43:05 | 2019-09-26T14:43:05 | 191,376,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import functools
def curried(n):
def curry(fn):
def _inner(*args):
if len(args) < n:
return curried(n - len(args))(functools.partial(fn, *args))
return fn(*args)
return _inner
return curry
| [
"glen@starchman.com"
] | glen@starchman.com |
95c65277f91241c50d4f1ba3d992e6bd1eade41d | 79605a09c30148d4d01ab6ac73f7ca4085a9915b | /mnist_fashion.py | fb343289ada36949d0138a74d01f0edb9acce635 | [] | no_license | ranjan103/Fashion-MNIST- | df43b768c5f6142d5a6a8e59e8d4fc5ee7023812 | 069e88b7b9bd5fcfa90790d1b6f23658b2b4144e | refs/heads/master | 2020-04-25T05:49:07.306261 | 2019-02-25T17:56:03 | 2019-02-25T17:56:03 | 172,556,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,348 | py | # -*- coding: utf-8 -*-
"""MNIST_fashion.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16fhQl202LeNZP_Wd6D2J1Drpb20Cg1ys
"""
import warnings
warnings.filterwarnings('ignore')
import pickle
import numpy as np
import pandas as pd
import json
import nltk
from textblob import TextBlob
import spacy
import matplotlib.pyplot as plt
import cv2
from sklearn.datasets import make_circles
import keras
from google.colab import drive
drive.mount('/content/gdrive')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import to_categorical
from keras import models
from keras import layers
import cv2
from sklearn.datasets import make_circles
from keras.models import Sequential
from keras.layers import Dense
from keras import models
model = models.Sequential()
X,Y = make_circles(n_samples=500,shuffle=True,noise=0.05,random_state=1,factor=0.8)
X.shape
model.add(Dense(units=2, activation='relu', input_dim=2))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=5, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
split=int(0.8*X.shape[0])
x_train=X[:split,:]
x_test=X[split:,:]
y_test=Y[split:]
y_train=Y[:split]
print(x_train.shape)
print(y_train.shape)
x_train=np.array(x_train)
y_train=np.array(y_train)
history=model.fit(x_train, y_train, epochs=1000, batch_size=8)
score = model.evaluate(x_test, y_test, verbose=1)
print(score)
history.history.keys()
plt.style.use("seaborn")
plt.plot(history.history['loss'])
plt.show()
fashion_mnist = keras.datasets.fashion_mnist
(train_images,train_labels) , (test_images,test_labels) = fashion_mnist.load_data()
print(train_labels)
print(train_labels.shape)
print(train_images.shape)
import cv2
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
img_=train_images[0]
img_ = img_.reshape((28,28))
plt.imshow(img_)
plt.colorbar()
plt.grid(False)
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(1,1))
img_=train_images[0]
img_ = img_.reshape((28,28))
plt.imshow(img_)
plt.colorbar()
plt.grid(False)
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
import tensorflow as tf
modell = models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
modell.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modell.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = modell.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = modell.predict(test_images)
print(predictions.shape)
print(predictions)
test_labels.shape
predictions[0]
pred_ = []
for i in range(test_images.shape[0]):
pred_.append(np.argmax(predictions[i]))
np.sum(pred_==test_labels)/float(test_labels.shape[0])
| [
"noreply@github.com"
] | noreply@github.com |
f0848bea7f02f1bf7e260eb65eeaf7fefbdc380a | daa90db36eff7050fe1224dc8caa403d9e95b5c9 | /tests/test_adjoints.py | 5fbab8f85551dc6a96b0c666a6f43509b69f6d57 | [
"MIT"
] | permissive | fagan2888/torchkbnufft | a19fc61648dc3b5665aa34680302691099c6dfac | 6c6e2c008ae3e8e48a938bedd25431f8db20c106 | refs/heads/master | 2020-12-02T23:29:45.918591 | 2019-12-19T20:15:47 | 2019-12-19T20:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,243 | py | import sys
import numpy as np
import torch
from torchkbnufft import (AdjKbNufft, AdjMriSenseNufft, KbInterpBack,
KbInterpForw, KbNufft, MriSenseNufft)
from torchkbnufft.math import inner_product
def test_interp_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
batch_size = params_2d['batch_size']
im_size = params_2d['im_size']
grid_size = params_2d['grid_size']
numpoints = params_2d['numpoints']
x = np.random.normal(size=(batch_size, 1) + grid_size) + \
1j*np.random.normal(size=(batch_size, 1) + grid_size)
x = torch.tensor(np.stack((np.real(x), np.imag(x)), axis=2))
y = params_2d['y']
ktraj = params_2d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbinterp_ob = KbInterpForw(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbinterp_ob = KbInterpBack(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbinterp_ob(x, ktraj)
y_back = adjkbinterp_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_nufft_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbnufft_ob(x, ktraj)
y_back = adjkbnufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_2d_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
smap = params_2d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_interp_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
batch_size = params_3d['batch_size']
im_size = params_3d['im_size']
grid_size = params_3d['grid_size']
numpoints = params_3d['numpoints']
x = np.random.normal(size=(batch_size, 1) + grid_size) + \
1j*np.random.normal(size=(batch_size, 1) + grid_size)
x = torch.tensor(np.stack((np.real(x), np.imag(x)), axis=2))
y = params_3d['y']
ktraj = params_3d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbinterp_ob = KbInterpForw(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbinterp_ob = KbInterpBack(
im_size=im_size,
grid_size=grid_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbinterp_ob(x, ktraj)
y_back = adjkbinterp_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_nufft_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = kbnufft_ob(x, ktraj)
y_back = adjkbnufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_3d_adjoint(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
smap = params_3d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
def test_mrisensenufft_3d_coilpack_adjoint(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
norm_tol = testing_tol
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
smap = params_2d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints,
coilpack=True
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints,
coilpack=True
).to(dtype=dtype, device=device)
x_forw = sensenufft_ob(x, ktraj)
y_back = adjsensenufft_ob(y, ktraj)
inprod1 = inner_product(y, x_forw, dim=2)
inprod2 = inner_product(y_back, x, dim=2)
assert torch.norm(inprod1 - inprod2) < norm_tol
| [
"matt.muckley@gmail.com"
] | matt.muckley@gmail.com |
cb51eb6a2f963f2087652b4694cfd9b3a685df21 | 2f791e0444719ddcb8cc407e72e869f7fac5181b | /graphics/PromIndexResultsMerger.py | 2fc9cf0aab3d3cf6444c6b94e434fe502cc537b0 | [] | no_license | ichen-lab-ucsb/WFLIVM_k-Seq | 35d522df889e35826e535be56ed4d5579efe2c1b | 68990737c2257cef2815d7df74e2f7686bc5a597 | refs/heads/main | 2023-04-20T15:02:36.076837 | 2021-04-23T22:26:09 | 2021-04-23T22:26:09 | 360,320,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | ### this script concatenates the output files from the promiscuity index calculator (http://hetaira.herokuapp.com/) and merges I values with the master results table
### input files must be in an accessible directory (eg 'data/promiscuity_index_tables/WFLIVM-r_results_tables/') and must be in the format 'results (i).csv'
### import libraries
import pandas as pd
### create dataframe and
PI_DF = pd.DataFrame(columns=['seq','I'])
FileRange = range(0,40) ### set file range based on number of input files
for i in FileRange:
FileName = 'data/promiscuity_index_tables/WFLIVM-r_results_tables/results (' + str(i) + ').csv'
data = pd.read_csv(FileName, header=None, index_col=False)
data.columns = ['seq','I']
data.drop(data.tail(1).index,inplace=True)
PI_DF = pd.concat([PI_DF,data], ignore_index=True)
### merge I values to master file
df = pd.read_csv('data/WFLIVM-k-seq_merged_+r.csv').sort_values(by='seq')
merged = df.merge(PI_DF, on='seq')
merged.to_csv('data/WFLIVM-k-seq_merged_+r+I.csv', index=False) | [
"noreply@github.com"
] | noreply@github.com |
74a1d218dd31db1af846475408c11a85b61f2503 | dbb451b9775b9345ccc26b562bbddf6d7ade94d8 | /Python v4/Django 2.2 v4/Misc/orm/orm_app/migrations/0001_initial.py | 10a85ba2565a2325d960d26339e9e4b8bf50627e | [] | no_license | ethan-mace/Coding-Dojo | b46968806c80c73b736f98155aea89b8b33c4b0b | a1c7c88e9f0e5a5ebcafde733d5acaebec071270 | refs/heads/main | 2023-02-22T15:19:44.030120 | 2021-01-26T15:48:17 | 2021-01-26T15:48:17 | 325,411,101 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 3.1.3 on 2020-11-12 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('description', models.TextField()),
('release_date', models.DateTimeField()),
('duration', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"ethanmace@protonmail.com"
] | ethanmace@protonmail.com |
f88d26fd93f16bef39a4eafcdb8174838d8e21bd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2147/60692/307788.py | 10de8faf4f82529d5d59df010ef8d72681e4f591 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | n = input()
if n == '5 5 1 3 2':
print(0)
print(3)
print(3)
print(2)
print(5)
elif n == '100 109 79 7 5':
list1 = [27,52,80,50,40,37,27,60,60,55,55,25,40,80,52,50,25,45,72,45,65,32,22,50,20,80,35,20,22,47,52,20,77,22,52,12,75,55,75,77,75,27,7,75,27,82,52,47,22,75,65,22,57,42,45,40,77,45,40,7,50,57,85,5,47,50,50,32,60,55,62,27,52,20,52,62,25,42,0,45,30,40,15,82,17,67,52,65,50,10,87,52,67,25,,70,67,52,67,42,55]
for i in list1:
print(i)
else:
print(n) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f044bb0442fbcbaa809b0a48dc4740ee1516c226 | c3e86a8cb94c67a7b0881d72a50e529b85cd27ac | /NameMixer2.0.py | 6c493c9546e36c0e61cfd4102f74382c99c81b06 | [] | no_license | Rinlix/Rix | 252e011089e6b7eec138db3f9e1dc50621974aa8 | 478eb79eb12e38f7479c45cc08ec6af3ebfd6c0e | refs/heads/master | 2020-04-16T16:55:58.775508 | 2019-01-15T00:24:41 | 2019-01-15T00:24:41 | 165,755,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | #NameMixer.py
import random
import time
while True:
true = True
def rigged():
print('DToo much cringe and toxic, cannot survive...')
true = False
names = []
while true:
name = input('Enter A Name: [q] to randomize all names ')
if name == 'q':
num = len(names)
true = False
elif name == 'DonaldTrump':
rigged()
else:
names.append(name)
for a in range(num):
output = random.choice(names)
names.remove(output)
ordinal = a + 1
if ordinal == 1:
ordinal1 = '1st'
elif ordinal == 2:
ordinal1 = '2nd'
elif ordinal == 3:
ordinal1 = '3rd'
else:
ordinal = str(ordinal)
ordinal1 = (ordinal + 'th')
for i in range(6):
time.sleep(0.001)
print('=', end='')
print('', end='\n')
print('')
print(ordinal1,'is', ': ', output)
print('')
| [
"noreply@github.com"
] | noreply@github.com |
bffc7093c19d2b1011751494132301c78945c914 | 4d1e2ed1bd2ff8ea1b3ba5613857503ccfce4b48 | /external/emsdk_portable/emscripten/1.34.1/tools/separate_asm.py | 4f48286e15bbbe77b1a786ad71f1c0635378d82d | [
"MIT",
"NCSA"
] | permissive | brooklynpacket/cocos2d-x | 7bf18909cf2af221aac70fdbe000658e7e97bcb8 | 03b3f8deef304bb9b5ed50acb23158e71712cc15 | refs/heads/master | 2023-08-24T10:38:11.252485 | 2019-02-06T01:23:56 | 2019-02-06T01:23:56 | 6,341,455 | 1 | 1 | null | 2022-09-26T13:53:36 | 2012-10-22T20:00:19 | C++ | UTF-8 | Python | false | false | 724 | py | #!/usr/bin/env python2
'''
Separates out the core asm module out of an emscripten output file.
This is useful because it lets you load the asm module first, then the main script, which on some browsers uses less memory
'''
import os, sys
import asm_module
infile = sys.argv[1]
asmfile = sys.argv[2]
otherfile = sys.argv[3]
everything = open(infile).read()
module = asm_module.AsmModule(infile).asm_js
module = module[module.find('=')+1:] # strip the initial "var asm =" bit, leave just the raw module as a function
everything = everything.replace(module, 'Module["asm"]')
o = open(asmfile, 'w')
o.write('Module["asm"] = ')
o.write(module)
o.write(';')
o.close()
o = open(otherfile, 'w')
o.write(everything)
o.close()
| [
"jeff@brooklynpacket.com"
] | jeff@brooklynpacket.com |
0419ac9e3a8690b336dfd0c914d177fad34f610a | 79c6aa23011caa4ac8ddc5abf0da7ff6a189df9e | /user/migrations/0011_alter_post_date.py | 9d89d725ff8f018293e7a4068a6557816c2e4ae5 | [] | no_license | anthonyd21/anthonyd21.github.io | 76c8a0ef3fe79f5fdff84f6acae48a35f49da41d | ad4bdd362263655daa08e58ace3667772e179807 | refs/heads/main | 2023-05-08T00:26:00.588030 | 2021-06-02T15:39:09 | 2021-06-02T15:39:09 | 368,646,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.2.3 on 2021-06-02 04:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0010_alter_post_date'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"anthonyd21@parkschool.net"
] | anthonyd21@parkschool.net |
1c72a69c41c707bacbf963e7c9a6acc1973fdfc0 | badd02f87eeee1216df4c66447e947f0f1cbe328 | /FlaskWebProject2/views.py | de8be825114043a447a1f9057b62635220fc4f58 | [] | no_license | Ajithvajrala23/Website-Using-Flask-Framework | 7dafbeb9eba7d8ad6f49c15eb58ec0ed4fb713f2 | c1ed1edb6d379daf6ef4ba3b36d27b7418231a64 | refs/heads/master | 2022-07-14T13:56:56.002797 | 2019-07-04T10:26:59 | 2019-07-04T10:26:59 | 192,701,155 | 0 | 0 | null | 2022-06-21T22:12:10 | 2019-06-19T09:25:56 | JavaScript | UTF-8 | Python | false | false | 5,260 | py | """
Routes and views for the flask application.
"""
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from FlaskWebProject2 import app
import os
import requests
import operator
import re
#import nltk
from flask import Flask, render_template, request, send_file
from collections import Counter
#from bs4 import BeautifulSoup
#from textblob import TextBlob
import numpy as np
#from textblob.sentiments import NaiveBayesAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
#import base64
analyser = SentimentIntensityAnalyzer()
stops = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',
'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',
'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for',
'with', 'about', 'against', 'between', 'into', 'through', 'during',
'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now', 'id', 'var',
'function', 'js', 'd', 'script', '\'script', 'fjs', 'document', 'r',
'b', 'g', 'e', '\'s', 'c', 'f', 'h', 'l', 'k'
]
def calculate_sentimet(comment):
score = analyser.polarity_scores(comment)
negative = score['neg']
positive = score['pos']
neutral = score['neu']
return positive,neutral, negative
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
)
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='Details'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About Me',
year=datetime.now().year,
message='I am Libra'
)
@app.route('/projects')
def projects():
"""Renders the about page."""
return render_template(
'projects.html',
title='Projects',
year=datetime.now().year,
message='My Notable works are'
)
@app.route("/text")
def text():
return render_template('text.html')
@app.route("/process", methods =['POST'])
def process():
comment = request.form['comment']
positive, neutral, negative = calculate_sentimet(comment)
pie_labels = ['Positive' ,'Neutral', 'Negative']
pie_values = [positive*100, neutral*100, negative*100]
colors = ['green', 'orange', 'red']
return render_template('sentiment.html', comment = comment,
positive = positive, neutral = neutral,
negative= negative,
max=17000,
set=zip(pie_values, pie_labels, colors))
@app.route('/me', methods=['GET', 'POST'])
def me():
errors = []
results = {}
if request.method == "POST":
# get url that the person has entered
try:
url = request.form['url']
r = requests.get(url)
print(r)
except:
errors.append(
"Unable to get URL. Please make sure it's valid and try again."
)
return render_template('me.html', errors=errors)
if r:
# text processing
print(r)
raw = BeautifulSoup(r.text, 'html.parser').get_text()
#nltk.data.path.append('./nltk_data/') # set the path
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# remove punctuation, count raw words
nonPunct = re.compile('.*[A-Za-z].*')
raw_words = [w for w in text if nonPunct.match(w)]
raw_word_count = Counter(raw_words)
# stop words
no_stop_words = [w for w in raw_words if w.lower() not in stops]
no_stop_words_count = Counter(no_stop_words)
# save the results
results = sorted(
no_stop_words_count.items(),
key=operator.itemgetter(1),
reverse=True
)
print(results)
try:
result = Result(
url=url,
result_all=raw_word_count,
result_no_stop_words=no_stop_words_count
)
except:
errors.append("Unable to add item to database.")
return render_template('me.html', errors=errors, results=results)
| [
"ajith.vajrala@gmail.com"
] | ajith.vajrala@gmail.com |
478b4ad805ee0087c6d18ba496681501d17cbbd0 | f0d925b64af90d903971aeb23225d9a4e98ee77d | /registration/tests.py | 36fbe28bf3359b091c359bea691af25368f9ac4c | [] | no_license | joseduno/django-playground-web | 8d0fd7c8746eaf4ffcd83970f95340dd23234f2b | a2121ac5e0e1ac06490e08b07f9f305988969778 | refs/heads/master | 2022-12-22T07:36:58.654226 | 2020-10-04T20:00:05 | 2020-10-04T20:00:05 | 291,525,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from django.test import TestCase
from .models import Profile
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTestCase(TestCase):
def setUp(self): # debe siempre llamarse asi
User.objects.create_user('test', 'test@test.com', 'test1234')
def test_profile_exists(self): # el nombre de la funcion debe empezar siempre con test_
exists = Profile.objects.filter(user__username='test').exists()
self.assertEqual(exists, True)
"""Para ejecutar la prueba, python3 manage.py test registration"""
| [
"jose.duno@spymovil.com"
] | jose.duno@spymovil.com |
6e38e37f80b07675a03fc3c09d5f3f29091bf7f4 | 680a9f1cf6d54caf320021d8848bd42b8dbc703e | /site-packages/webassets/filter/compass.py | 2414d2bb0e308067e5a96bce368c127fde798caa | [] | no_license | rljacobson/Guru-NB | 9af650cb61c4ba86a4aa7f09b5e2f21a58486f12 | 8a36ac4c92b1c29102029b0f678311d11cff542c | refs/heads/master | 2021-01-15T18:53:05.071984 | 2015-02-01T17:31:48 | 2015-02-01T17:31:48 | 10,697,118 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,870 | py | """
Generally speaking, compass provides a command line util that is used
a) as a management script (like django-admin.py) doing for example
setup work, adding plugins to a project etc), and
b) can compile the sass source files into CSS.
While generally project-based, starting with 0.10, compass supposedly
supports compiling individual files, which is what we are using for
implementing this filter. Supposedly, because there are numerous issues
that require working around. See the comments in the actual filter code
for the full story on all the hoops be have to jump through.
An alternative option would be to use Sass to compile. Compass essentially
adds two things on top of sass: A bunch of CSS frameworks, ported to Sass,
and available for including. And various ruby helpers that these frameworks
and custom Sass files can use. Apparently there is supposed to be a way
to compile a compass project through sass, but so far, I haven't got it
to work. The syntax is supposed to be one of:
$ sass -r compass `compass imports` FILE
$ sass --compass FILE
See:
http://groups.google.com/group/compass-users/browse_thread/thread/a476dfcd2b47653e
http://groups.google.com/group/compass-users/browse_thread/thread/072bd8b51bec5f7c
http://groups.google.com/group/compass-users/browse_thread/thread/daf55acda03656d1
"""
import os
from os import path
import tempfile
import shutil
import subprocess
from webassets import six
from webassets.exceptions import FilterError
from webassets.filter import Filter, option
__all__ = ('Compass',)
class CompassConfig(dict):
"""A trivial dict wrapper that can generate a Compass config file."""
def to_string(self):
def string_rep(val):
""" Determine the correct string rep for the config file """
if isinstance(val, bool):
# True -> true and False -> false
return str(val).lower()
elif isinstance(val, six.string_types) and val.startswith(':'):
# ruby symbols, like :nested, used for "output_style"
return str(val)
elif isinstance(val, dict):
# ruby hashes, for "sass_options" for example
return '{%s}' % ', '.join("'%s' => '%s'" % i for i in val.items())
elif isinstance(val, tuple):
val = list(val)
# works fine with strings and lists
return repr(val)
return '\n'.join(['%s = %s' % (k, string_rep(v)) for k, v in self.items()])
class Compass(Filter):
"""Converts `Compass <http://compass-style.org/>`_ .sass files to
CSS.
Requires at least version 0.10.
To compile a standard Compass project, you only need to have
to compile your main ``screen.sass``, ``print.sass`` and ``ie.sass``
files. All the partials that you include will be handled by Compass.
If you want to combine the filter with other CSS filters, make
sure this one runs first.
Supported configuration options:
COMPASS_BIN
The path to the Compass binary. If not set, the filter will
try to run ``compass`` as if it's in the system path.
COMPASS_PLUGINS
Compass plugins to use. This is equivalent to the ``--require``
command line option of the Compass. and expects a Python list
object of Ruby libraries to load.
COMPASS_CONFIG
An optional dictionary of Compass `configuration options
<http://compass-style.org/help/tutorials/configuration-reference/>`_.
The values are emitted as strings, and paths are relative to the
Environment's ``directory`` by default; include a ``project_path``
entry to override this.
"""
name = 'compass'
max_debug_level = None
options = {
'compass': ('binary', 'COMPASS_BIN'),
'plugins': option('COMPASS_PLUGINS', type=list),
'config': 'COMPASS_CONFIG',
}
def open(self, out, source_path, **kw):
"""Compass currently doesn't take data from stdin, and doesn't allow
us accessing the result from stdout either.
Also, there's a bunch of other issues we need to work around:
- compass doesn't support given an explict output file, only a
"--css-dir" output directory.
We have to "guess" the filename that will be created in that
directory.
- The output filename used is based on the input filename, and
simply cutting of the length of the "sass_dir" (and changing
the file extension). That is, compass expects the input
filename to always be inside the "sass_dir" (which defaults to
./src), and if this is not the case, the output filename will
be gibberish (missing characters in front). See:
https://github.com/chriseppstein/compass/issues/304
We fix this by setting the proper --sass-dir option.
- Compass insists on creating a .sass-cache folder in the
current working directory, and unlike the sass executable,
there doesn't seem to be a way to disable it.
The workaround is to set the working directory to our temp
directory, so that the cache folder will be deleted at the end.
"""
tempout = tempfile.mkdtemp()
# Temporarily move to "tempout", so .sass-cache will be created there
old_wd = os.getcwd()
os.chdir(tempout)
try:
# Make sure to use normpath() to not cause trouble with
# compass' simplistic path handling, where it just assumes
# source_path is within sassdir, and cuts off the length of
# sassdir from the input file.
sassdir = path.normpath(path.dirname(source_path))
source_path = path.normpath(source_path)
# Compass offers some helpers like image-url(), which need
# information about the urls under which media files will be
# available. This is hard for two reasons: First, the options in
# question aren't supported on the command line, so we need to write
# a temporary config file. Secondly, the assume a defined and
# separate directories for "images", "stylesheets" etc., something
# webassets knows nothing of: we don't support the user defining
# something such directories. Because we traditionally had this
# filter point all type-specific directories to the root media
# directory, we will define the paths to match this. In other
# words, in Compass, both inline-image("img/test.png) and
# image-url("img/test.png") will find the same file, and assume it
# to be {env.directory}/img/test.png.
# However, this partly negates the purpose of an utility like
# image-url() in the first place - you not having to hard code
# the location of your images. So we allow direct modification of
# the configuration file via the COMPASS_CONFIG setting (see
# tickets #36 and #125).
#
# Note that is also the --relative-assets option, which we can't
# use because it calculates an actual relative path between the
# image and the css output file, the latter being in a temporary
# directory in our case.
config = CompassConfig(
project_path=self.env.directory,
http_path=self.env.url,
http_images_dir='',
http_stylesheets_dir='',
http_fonts_dir='',
http_javascripts_dir='',
images_dir='',
)
# Update with the custom config dictionary, if any.
if self.config:
config.update(self.config)
config_file = path.join(tempout, '.config.rb')
f = open(config_file, 'w')
try:
f.write(config.to_string())
f.flush()
finally:
f.close()
command = [self.compass or 'compass', 'compile']
for plugin in self.plugins or []:
command.extend(('--require', plugin))
command.extend(['--sass-dir', sassdir,
'--css-dir', tempout,
'--config', config_file,
'--quiet',
'--boring',
'--output-style', 'expanded',
source_path])
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell: necessary on windows to execute
# ruby files, but doesn't work on linux.
shell=(os.name == 'nt'))
stdout, stderr = proc.communicate()
# compass seems to always write a utf8 header? to stderr, so
# make sure to not fail just because there's something there.
if proc.returncode != 0:
raise FilterError(('compass: subprocess had error: stderr=%s, '+
'stdout=%s, returncode=%s') % (
stderr, stdout, proc.returncode))
guessed_outputfile = \
path.join(tempout, path.splitext(path.basename(source_path))[0])
f = open("%s.css" % guessed_outputfile)
try:
out.write(f.read())
finally:
f.close()
finally:
# Restore previous working dir
os.chdir(old_wd)
# Clean up the temp dir
shutil.rmtree(tempout)
| [
"rljacobson@gmail.com"
] | rljacobson@gmail.com |
45feca857f34e368b1e8a792f14d2161e7944bc8 | 9a44bd208fba409164207fb5e2d8192a4fc345e7 | /Marc/crawling_foc.py | 58217f757b8bb3d7d166b3c1206e06ffec51287b | [] | no_license | sinlars/GuoTuMarc2 | 06e96b0ce230902a0975512f00ce7e5d9eb963f3 | 99d057b854ff245482e2e5c93c08ab31145ce9d1 | refs/heads/master | 2023-02-15T00:52:21.468424 | 2020-12-30T05:36:23 | 2020-12-30T05:36:23 | 325,466,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,543 | py | #coding:utf-8
import urllib.request
import lxml.html
from pymarc import Record, Field
from pymarc import MARCReader
import re
import xlwt
import sys,io
import openpyxl
from bs4 import BeautifulSoup
import gzip
import docx
from docx import Document
from io import BytesIO
import pymysql
import pinyin
import datetime
import requests
#改变标准输出的默认编码
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
#'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
#'Connection':'keep-alive',
#'Cookie':'_gscu_413729954=00942062efyg0418; Hm_lvt_2cb70313e397e478740d394884fb0b8a=1500942062',
#'Host':'opac.nlc.cn',
'Cookie':'PHPSESSID=0f94e40864d4e71b5dfeb2a8cf392922; Hm_lvt_668f5751b331d2a1eec31f2dc0253443=1542012452,1542068702,1542164499,1542244740; Hm_lpvt_668f5751b331d2a1eec31f2dc0253443=1542246351',
'Upgrade-Insecure-Requests':'1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3141.7 Safari/537.36 Core/1.53.3226.400 QQBrowser/9.6.11682.400'}
def getHtml(url,num_retries = 5):
print('Crawling url:',url)
try:
request = urllib.request.Request(url,headers=headers)
response = urllib.request.urlopen(request,timeout=30)
info = response.info();
page_html = ''
page_html = response.read()
if info.get('Content-Encoding') == 'gzip':
buff = BytesIO(page_html) # 把content转为文件对象
f = gzip.GzipFile(fileobj=buff)
page_html = f.read().decode('utf-8')
else:
page_html = page_html.decode('utf-8','ignore')
print(page_html)
except Exception as e:
print('Downloading error:',str(e))
print('重试次数:', num_retries)
page_html = None;
if (num_retries > 0):
if(hasattr(e, 'code') and 500 <= e.code < 600) :
return getHtml(url,num_retries - 1)
else:
return getHtml(url, num_retries - 1)
else :
print('重试次数完毕:',num_retries)
return page_html
return page_html
def insertMysql(sql):
#sql = pymysql.escape_string(sql)
lastid = 0
db = pymysql.connect(host='localhost',port= 3306,user = 'root',passwd='123456',db='zhiwu',charset='utf8')
cursor = db.cursor()
db.escape(sql)
try:
#print(sql)
cursor.execute(sql)
lastid = db.insert_id();
db.commit()
except Exception as e:
print(e)
db.rollback()
cursor.close()
db.close()
return lastid
def get_pinyin(str):
if(str is None) :
return ''
str = str.strip()
if(str == 'None' or str == ''):
return ''
return pinyin.get(str,format='strip',delimiter=' ')
def get_pinyin_prefix(str):
if (str is None):
return ''
str = str.strip()
if (str == 'None' or str == ''):
return ''
return pinyin.get_initial(str,delimiter='').upper()
def get_name_existed(name):
db = pymysql.connect(host='localhost',port= 3306,user = 'root',passwd='123456',db='zhiwu',charset='utf8')
cursor = db.cursor()
sql = 'select * from tb_classsys where classsys_latin=\'%s\''
sql = sql % name
cursor.execute(sql)
data = cursor.fetchone()
#print(data)
cursor.close()
return data
def get_foc():
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='zhiwu', charset='utf8')
cursor = db.cursor()
sql = 'select * from zhiwu2'
#sql = sql % name
cursor.execute(sql)
data = cursor.fetchall()
print(data)
cursor.close()
return data
def get_text_docx():
file = docx.Document("C:\\Users\\dell\\Desktop\\高等九卷.docx")
i = 1
j = 0
wb = xlwt.Workbook()
ws = wb.add_sheet('中国高等植物彩色图鉴正文内容-第九卷', cell_overwrite_ok=True)
ws.write(0, 0, '物种中文名')
ws.write(0, 1, '物种拉丁名') # 科-中文名
ws.write(0, 2, '正文内容')
ws.write(0, 3, '正文英文内容')
ke = False
for p in file.paragraphs:
#if i > 20 :break
#print('--------------------')d
#if p.text.strip() == '':break
if p.text.strip() == '' :
continue
if j%4 == 0:
j = 0
i = i + 1
print('----------',i, j)
ws.write(i, j, p.text.strip())
if ke is True:
j = 0
#i = i + 1
ke = False
else :
j = j + 1
print(p.text,'---',p.style.name)
#print(run.bold for run in p.runs)
#if p.style.name == '种-英文' :
for run in p.runs:
if run.bold :
print(run.text,run.bold)
#print(run.bold)
#print('--------------------')
#j = j + 1
if p.text.strip().endswith('科'):
ke = True
wb.save("C:/Users/dell/Desktop/高等九卷.xls")
def get_content(url='http://www.efloras.org/',cralw_url='http://www.efloras.org/browse.aspx?flora_id=2&page=%s',pages=2):
for i in range(1,pages+1):
cralw_url_i = cralw_url % (str(i))
info = getHtml(cralw_url_i)
#print(info)
page_context = BeautifulSoup(info, "html.parser")
divs = page_context.find_all(id='ucFloraTaxonList_panelTaxonList')
#print(divs)
if len(divs) > 0:
div = divs[0]
table = div.find_all('table')[0]
#print(table)
trs = table.find_all('tr')
#print(trs)
for tr in trs:
tds = tr.find_all('td')
if len(tds) == 5:
print(tds[0].text,tds[1].text,tds[2].text,tds[3].text,tds[4].text)
#if tds[1].fina_all('a') is not None:
ke_urls = tds[1].select('a[href]')
print(ke_urls)
if len(ke_urls) > 0:
ke_url = ke_urls[0].get('href');
print('ke_url :',ke_url)
ke_context = getHtml(url)
#print(ke_context)
ke_context_soup = BeautifulSoup(ke_context, "html.parser")
table_ke = ke_context_soup.find_all('table',id='footerTable')
print(table_ke)
shu_urls = tds[3].select('a[href]')
print(shu_urls)
if len(shu_urls) > 0:
print('shu_url :',shu_urls[0].get('href'))
def get_ke_context(url):
volume_content = {};
ke_context = getHtml(url)
volume_content['url'] = url
volume_content['taxon_id'] = get_max_number(url)
ke_context_soup = BeautifulSoup(ke_context, "html.parser")
table_ke = ke_context_soup.find_all('table', id='footerTable')
tds = table_ke[0].select('td[style]')
#print(tds[0].text)# 科所在的卷册、页码等
volume_content['volume_title'] = tds[0].text
div_context = ke_context_soup.find_all('div', id='panelTaxonTreatment')
#print(div_context[0].find_all(re.compile("^image")))
#print('正文内容:',div_context[0].prettify())
foc_taxon_chain = ke_context_soup.select_one('span[id="lblTaxonChain"]')
#print(foc_taxon_chain)
parent_links = foc_taxon_chain.find_all('a')
if parent_links:
parent_link = parent_links[len(parent_links)-1].get('href')
volume_content['parent_taxon_id'] = get_max_number(parent_link)
volume_list = foc_taxon_chain.find_all('a', href=re.compile("volume_id"), recursive=False)
if len(volume_list) == 1:
volume_content['volume_id'] = get_max_number(volume_list[0].get('href'))
volume_content['volume'] = volume_list[0].text
span = div_context[0].find_all('span',id='lblTaxonDesc')[0]
#print('正文内容:', span.prettify())
#print(span.prettify())
#####################获取有image图片信息的部分内容################
image_table = span.select_one('table')
if image_table:
image_table_tr_list = image_table.find_all('tr')
for image_table_tr in image_table_tr_list:
image_table_td_list = image_table_tr.find_all('td')
for image_table_td in image_table_td_list:
if image_table_td.a:
#print('图片连接:',image_table_td.select_one('a').img.get('src')) ##获取图片的链接\
image_link = image_table_td.a.img.get('src')
#print('图片连接:', image_link)
#download_file(image_link,'F:\FloraData\images\\' + str(get_max_number(image_link)) + '.jpg')
if image_table_td.a.next_sibling :
print('当前物种的拉丁名及链接等:',image_table_td.a.next_sibling.get('href'),image_table_td.a.next_sibling.text)
if image_table_td.a.next_sibling.next_sibling:
print('Credit:',image_table_td.a.next_sibling.next_sibling.small.text)
image_table.extract()
###############################################################
#print(span.b.next_siblings)
latin_name_object = []
for wuzh in span.next_element.next_siblings:
if wuzh.name == 'p':
continue
if wuzh.name == 'a': #表示直接跳转下个物种,类似 See Isoëtaceae # http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=20790
latin_name_object = []
latin_name_object.append(wuzh)
break
if wuzh.name == 'small' :
volume_content['small'] = wuzh.string.strip('\n\r ')
continue
if wuzh.string is not None and wuzh.string.strip('\n\r '):
latin_name_object.append(wuzh)
#else:
# print(repr(wuzh).strip(['\n', ' ', '\r\n']))
print(latin_name_object)
if len(latin_name_object) > 1:
if latin_name_object[0].name is None: #如果第一个字符串是类似1.,7a,... 则表示序号
volume_content['xuhao'] = latin_name_object[0].string.strip('\n\r ')
else:
volume_content['xuhao'] = ''
if latin_name_object[len(latin_name_object)-1].name is None : #如果最后一个字符串是类似(Blume) Tagawa, Acta Phytotax. Geobot. 7: 83. 1938.则表示文献
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object[1:len(latin_name_object)-1] ))
else:
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object[1:]))
else:
volume_content['xuhao'] = ''
volume_content['latin_name'] = ' '.join(list(latin.string.strip('\n\r ') for latin in latin_name_object))
#volume_content['xuhao'] = latin_name[0]
#print(span.b.next_sibling) #当前物种信息的物种拉丁名
#print(span.b.find_next_sibling("p").contents[0].strip())
volume_content['latin_name_full'] = span.b.next_sibling.strip()
#print(span.b.find_next_sibling("p"))
#print('-----------------------')
#print(span.b.find_next_sibling("p").contents[0])
zh_name_and_pinyin = span.b.find_next_sibling("p").contents[0]
if is_all_zh(zh_name_and_pinyin): #含有中文
print('#######################')
print(zh_name_and_pinyin.split(' ')[0].strip())
print(' '.join(zh_name_and_pinyin.split(' ')[1:]))
#print(re.sub('[A-Za-z0-9\!\%\[\]\,\。\(\)]', '', zh_name_and_pinyin))
#print(' '.join(re.findall(r'[A-Za-z\(\)]+', zh_name_and_pinyin)))
volume_content['zh_name'] = zh_name_and_pinyin.split(' ')[0].strip()
volume_content['zh_name_pinyin'] = ' '.join(zh_name_and_pinyin.split(' ')[1:]).strip()
else:
volume_content['zh_name'] = ''
volume_content['zh_name_pinyin'] = zh_name_and_pinyin.strip()
#authors = span.b.find_next_sibling("p").p.next_element #获取下面一个直接字符串
spdesc_p_list = span.b.find_next_sibling("p").p
#print('##############################################')
#print(spdesc_p_list)
#print('##############################################')
#print(spdesc_p_list.find_all('a',recursive=False))
authors_list = []
authors_id_list = []
for author in spdesc_p_list.find_all('a',recursive=False):
#print(author.text,author.get('href'))
authors_list.append(author.text)
authors_id_list.append(str(get_max_number(author.get('href'))))
volume_content['authors'] = ';'.join(authors_list)
volume_content['authors_id'] = ';'.join(authors_id_list)
#print(authors.find_all('p',recursive=False)[0].prettify())
spdescs = spdesc_p_list.find_all('p',recursive=False)
#print(spdescs)
print('##############################################')
if len(spdescs) > 0:
specs_context = ''
table = spdescs[0].select_one('table')
if table is not None:
#print(table.find_all('a'))
for s in table.next_sibling.next_sibling.strings:
#print(repr(s),type(s),s.parent.name=='i')
if s.parent.name == 'i':
specs_context = specs_context + '<i>' + s.strip('\n') + '</i>'
else:
specs_context = specs_context + s.strip('\n')
else :
#print(spdescs[0].strings)
for s in spdescs[0].strings:
#print(s)
if s.parent.name == 'i':
specs_context = specs_context + '<i>' + s.strip('\n') + '</i>'
else:
if s.parent.name == 'b':
specs_context = specs_context + '<b>' + s.strip('\n') + '</b>'
specs_context = specs_context + s.strip('\n')
#print(specs_context.strip())
#print('##############################################')
#print(specs_context.strip())#获取正文内容
volume_content['content'] = specs_context.strip()
#volume_content['create_date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# sql = "insert into volume_content (`content`,`create_date`,`del_flag`) values ('%s','%s','%s')"
# sql = sql % (specs_context.strip(), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 0)
# pid = insertMysql(sql)
#print('##############################################')
print(volume_content)
wuzhong_detail_sql(volume_content)
table_jiansuobiao = div_context[0].find_all('table',id='tableKey') #获取检索表的内容
if len(table_jiansuobiao) > 0:
trs_jiansuobiao = table_jiansuobiao[0].find_all('tr')
table_jsb = trs_jiansuobiao[1].find_all('table')
if len(table_jsb) > 0:
trs_jsb = table_jsb[0].find_all('tr')
for tr in trs_jsb:
tds_jsb = tr.find_all('td')
tds_jxb_cs = tds_jsb[3].contents;
goto_no = ''
goto_id = ''
for tds_jxb_c in tds_jxb_cs:
#print(tds_jxb_c.name)
if tds_jxb_c.name == 'a':
tds_jxb_c_href = tds_jxb_c.get('href')
tds_jxb_c_s = tds_jxb_c.string;
if tds_jxb_c_s is not None:
#print(tds_jxb_c)
goto_id = tds_jxb_c_href + '='+ tds_jxb_c_s
else:
goto_id = tds_jxb_c_href
else :
goto_no = tds_jxb_c
#print(tds_jsb[0].text,tds_jsb[1].text,tds_jsb[2].text,goto_no,goto_id)
############################################################################################
###lower_taxa_ul = div_context[0].select_one('ul')#获取当前物种的下级物种信息
###print(lower_taxa_ul)
# if lower_taxa_ul is not None:
# for li in lower_taxa_ul.find_all('li'):
# lower_taxa_a = li.select_one('a')
# #print(lower_taxa_a.get('href'),lower_taxa_a.b.string,lower_taxa_a.b.next_sibling)
############################################################################################
related_objects = div_context[0].select_one('span[id="lblObjectList"]')
#print(related_objects)
if related_objects is not None:
related_objects_trs = related_objects.find_all('tr')
#print(related_objects_trs)
for related_objects_tr in related_objects_trs:
related_objects_tds = related_objects_tr.find_all('td')
if len(related_objects_tds) == 2:
related_objects_td_li = related_objects_tds[0].li
if related_objects_td_li is not None:
li_a = related_objects_td_li.a
print(li_a.text,li_a.get('href'))
else:
print(related_objects_tds[0].text)
print(related_objects_tds[1].text)
else:
print('采集错误')
def get_foc_vol_list(url='http://www.efloras.org/index.aspx'): #从foc主页上获取foc卷册列表
context = getHtml(url)
context_soup = BeautifulSoup(context, "html.parser")
span = context_soup.find_all('span',id='lblFloraList')
url_list = []
#print(span)
if len(span) > 0:
ul_list = span[0].find_all('ul')
li_list = ul_list[2].find_all('li') #FOC在ul_list的第三个位置
a_list = li_list[1].find_all('a')
print(a_list)
for a in a_list[1:]: #a_list[1:]:
a_href = a.get('href')
print(' Volume :',a.text)
#sql = "insert into volume (`url`,`volume_id`,`volume_no`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s')"
if a_href is not None:
url_list.append('http://www.efloras.org/' + a_href)
volume_id = get_max_number(a_href)
print('volume_id',str(volume_id))
else:
print('获取不到volume信息')
else:
print('未找到FOC卷册列表')
return url_list
def get_foc_volume_list(volumes,index_url = 'http://www.efloras.org/',level = 0): # 根据卷册信息的地址找到科、属、种下属列表页,采集相关信息
#url_list = []
level = level + 1 #level = 1从科开始
for vol in volumes:
context = getHtml(vol)
if context is None:
continue
context_soup = BeautifulSoup(context, "html.parser")
div = context_soup.find_all('div', id='ucFloraTaxonList_panelTaxonList')
volumeInfo = context_soup.select_one('span[id="ucVolumeInfo_lblVolumeInfo"]')
volume_map = []
if volumeInfo is not None:
volumeInfo_table_trs = volumeInfo.table.find_all('tr')
if len(volumeInfo_table_trs) > 0:
for volumeInfo_table_tr in volumeInfo_table_trs:
volumeInfo_table_tds = volumeInfo_table_tr.find_all('td')
if len(volumeInfo_table_tds) == 2:
volume_map.append(volumeInfo_table_tds[1].text)
else:
volume_map.append('')
if len(volume_map) != 5:
for i in range(5-len(volume_map)): volume_map.append('')
#print(volume_map)
foc_taxon_chain = context_soup.select_one('span[id="ucFloraTaxonList_lblTaxonChain"]')
parent_links = foc_taxon_chain.find_all('a')
volume_list = foc_taxon_chain.find_all('a', href=re.compile("volume_id"), recursive=False)
print(volume_list)
if len(div) > 0:
tr_list = div[0].find_all('tr',class_='underline')
for tr in tr_list[2:]:
td_list = tr.find_all('td') #科为四列,其他为五列,每一个都是一个物种信息
wuzhong_list = {}
wuzhong_list['parent_taxon_id'] = get_max_number(vol)
wuzhong_list['type'] = str(level)
wuzhong_list['type_name'] = ''
wuzhong_list['taxon_name'] = ''
wuzhong_list['title'] = volume_map[0]
wuzhong_list['families'] = volume_map[1]
wuzhong_list['genera'] = volume_map[2]
wuzhong_list['speces'] = volume_map[3]
wuzhong_list['online_date'] = volume_map[4]
wuzhong_list['taxon_id'] = td_list[0].text.strip()
wuzhong_list['accepted_name'] = td_list[1].text.strip()
wuzhong_detail_link_a = td_list[1].select_one('a')
if wuzhong_detail_link_a:
wuzhong_list['accepted_name_url'] = index_url + wuzhong_detail_link_a.get('href')
else:
wuzhong_list['accepted_name_url'] = ''
wuzhong_list['accepted_name_cn'] = td_list[2].text.strip()
wuzhong_list['lower_taxa'] = td_list[3].text.strip()
lower_taxa_link_a = td_list[3].select_one('a')
if lower_taxa_link_a:
wuzhong_list['lower_taxa_url'] = index_url + lower_taxa_link_a.get('href')
else:
wuzhong_list['lower_taxa_url'] = ''
if len(td_list) == 4:
if len(volume_list) == 1:
wuzhong_list['volume_no'] = get_max_number(volume_list[0].get('href'))
wuzhong_list['volume_name'] = volume_list[0].text
else:
wuzhong_list['volume_no'] = 0
wuzhong_list['volume_name'] = 0
if len(td_list) == 5:
volume_link_a = td_list[4].select_one('a')
if volume_link_a:
wuzhong_list['volume_no'] = get_max_number(volume_link_a.get('href'))
wuzhong_list['volume_name'] = volume_link_a.text
else:
wuzhong_list['volume_no'] = 0
wuzhong_list['volume_name'] = 0
print(wuzhong_list)
wuzhong_list_sql(wuzhong_list)
if wuzhong_list['accepted_name_url'] :
print('开始采集详细内容:',wuzhong_list['accepted_name_url'])
get_ke_context(wuzhong_list['accepted_name_url'])
if wuzhong_list['lower_taxa_url'] :
print('开始采集:',wuzhong_list['accepted_name_cn'],' 的下级内容', wuzhong_list['accepted_name_url'])
url_list = []
url_list.append(wuzhong_list['lower_taxa_url'])
get_foc_volume_list(url_list,index_url,level)
else:
print('无法找到')
volume_related_links_table = context_soup.find_all('table', id='ucVolumeResourceList_dataListResource')
#print(volume_related_links_table)
if len(volume_related_links_table) > 0:
#print(volume_related_links_table[0])
volumes_relateds = volume_related_links_table[0].find_all('tr',recursive=False) #搜索当前节点的直接子节点
if len(volumes_relateds) > 0:
#print(volumes_relateds)
for volume in volumes_relateds[1:]:
trs=volume.find_all('tr')
if len(trs) > 0:
tds = trs[0].find_all('td')
if len(tds) > 1:
a = tds[0].select_one('a')
href = a.get('href')
print('--------',a.text,' ',href)
print('=====',tds[1].text)
sql1 = "insert into volume_related_links (`taxid`,`type`,`url`,`title`,`resource_type`,`files`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s')"
if tds[1].text.strip() == 'PDF':
paths = href.split('/')
print(paths)
#download_file(href,'f://FloraData//' + paths[len(paths)-1])
#sql1 = sql1 % (vol.split('&')[0]),tds[1].text,href,a.text,paths[len(paths)-1],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
sql1 = sql1 % (re.sub("\D", "", vol.split('&')[0]),tds[1].text,href,a.text,'PDF',paths[len(paths)-1],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
else: #tds[1].text == 'Treatment'
#get_ke_context(href)
sql1 = sql1 % (re.sub("\D", "", vol.split('&')[0]),tds[1].text,href,a.text,'',tds[1].text,
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'luoxuan', 0)
#print(insertMysql(sql1))
else:
print('无法找到volume_related_links')
def get_max_number(str): #获得连接中最大的数字
return max(list(map(int,re.findall(r"\d+\.?\d*",str))))
def is_all_zh(s): #是否含有中文
for ch in s:
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def insert_related_objects(related_objects):#插入相关内容到表中,返回当前的id
sql = "insert into volume_related_links (`taxon_id`,`parent_taxon_id`,`type`,`url`,`parent_title`,`title`,`content`,`resource_type`,`files`,`create_date`,`create_by`,`del_flag`) " \
"values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (related_objects['taxon_id'],related_objects['parent_taxon_id'],related_objects['type'],related_objects['url'],
related_objects['parent_title'],related_objects['title'],related_objects['content'],related_objects['resource_type'],
related_objects['files'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
pid = insertMysql(sql)
return pid
def insert_jiansuobiao(jiansuobiao): #插入检索表内容到表中,返回当前的id
sql = "insert into volume_related_links (`taxon_id`,`first_no`,`first_no2`,`content`,`no_name`,`second_no`,`latin_name`,`goto_taxon_id`,`goto_taxon_url`,`create_date`,`create_by`,`del_flag`) " \
"values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (jiansuobiao['taxon_id'],jiansuobiao['first_no'],jiansuobiao['first_no2'],jiansuobiao['content'],
jiansuobiao['no_name'],jiansuobiao['second_no'],jiansuobiao['latin_name'],jiansuobiao['goto_taxon_id'],
jiansuobiao['goto_taxon_url'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),'luoxuan',0)
pid = insertMysql(sql)
return pid
def wuzhong_detail_sql(volume_content): #插入详细内容到表中,返回当前插入的id
small = ''
if 'small' in volume_content : small = volume_content['small']
sql = "insert into volume_content (`url`,`content`,`taxon_id`,`parent_taxon_id`,`xuhao`,`latin_name`,`latin_name_full`,`zh_name`,`zh_name_pinyin`,`authors`,`authors_id`,`volume_id`,`volume`,`volume_title`,`create_date`,`del_flag`,`small`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (volume_content['url'],volume_content['content'],volume_content['taxon_id'],volume_content['parent_taxon_id'],volume_content['xuhao'],volume_content['latin_name'],
volume_content['latin_name_full'],volume_content['zh_name'],volume_content['zh_name_pinyin'],volume_content['authors'],volume_content['authors_id'],
volume_content['volume_id'],volume_content['volume'],volume_content['volume_title'],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 0,small)
pid = insertMysql(sql)
return pid
def wuzhong_list_sql(wuzhong_list):
sql = "insert into volume_ke (`parent_taxon_id`,`type`,`type_name`,`taxon_id`,`taxon_name`,`accepted_name`,`accepted_name_url`,`accepted_name_cn`,`lower_taxa`,`lower_taxa_url`,`volume_no`,`volume_name`,`title`,`families`,`genera`,`speces`,`online_date`,`create_date`,`create_by`,`del_flag`) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
sql = sql % (wuzhong_list['parent_taxon_id'], wuzhong_list['type'],
wuzhong_list['type_name'], wuzhong_list['taxon_id'],
wuzhong_list['taxon_name'],wuzhong_list['accepted_name'],
wuzhong_list['accepted_name_url'], wuzhong_list['accepted_name_cn'],
wuzhong_list['lower_taxa'], wuzhong_list['lower_taxa_url'],
wuzhong_list['volume_no'], wuzhong_list['volume_name'],
wuzhong_list['title'], wuzhong_list['families'],
wuzhong_list['genera'], wuzhong_list['speces'],
wuzhong_list['online_date'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'luoxuan', 0)
pid = insertMysql(sql)
return pid
def download_file(url,path): #下载文件
print('Download file:',url,path)
request = requests.get(url)
with open(path, "wb") as code:
code.write(request.content)
if __name__ == '__main__':
#search_isbn()
#print(html)
#read07Excel('C:/Users/dell/Desktop/书单:PDA_全库(2015)_20180621 科学文库书单第二版2.xlsx')
#get_page_html()
#get_ke_context('http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=250098342')
#get_ke_context('http://www.efloras.org/florataxon.aspx?flora_id=2&taxon_id=20790')
#get_text_docx()
#read07_excel('C:/Users/dell/Desktop/高等二卷.xlsx')
#mings = ['f','fsdf','fsdf1','fsdfs','fsdfs']
#print(mings[2:len(mings)])
# i = 0
# datas = get_foc();
# for data in datas:
# i = i + 1
# print(data)
# if i >= 10:break
lists = get_foc_vol_list()
##print(lists)
get_foc_volume_list(lists)
#print(getHtml('http://flora.huh.harvard.edu/FloraData/002/Vol11/foc11-Preface.htm'))
#print(get_page_html())
#vol = 'http://www.efloras.org/browse.aspx?flora_id=2&start_taxon_id=103074,volume_page.aspx?volume_id=2002&flora_id=2'
#print(is_all_zh('剑叶铁角蕨 jian ye tie jiao jue'))
#print(is_all_zh('jian ye tie jiao jue'))
#print(re.findall(r"\d+\.?\d*",vol),get_max_number(vol)) | [
"1QAZ2wsx"
] | 1QAZ2wsx |
68ae33c92faff858b27bc9a95c7b7ab370f1c58e | 930e76d01a4674a46f6927a382465d08ebfff536 | /src/core/database.py | 8edc98348388d0577f18d87edd316f9b6ea6f2e9 | [
"BSD-3-Clause"
] | permissive | Glacier-Ice/data-sci-api | 6ed88f4530ee071a77745d88189ff6bc83bf0932 | ddd8c1776a2c52f7c6c9d59cab9836a5f8926bc2 | refs/heads/master | 2023-05-10T21:06:22.157143 | 2020-05-11T18:15:54 | 2020-05-11T18:15:54 | 240,423,916 | 5 | 3 | BSD-3-Clause | 2023-05-01T21:22:46 | 2020-02-14T03:56:40 | Python | UTF-8 | Python | false | false | 1,056 | py | import psycopg2
from flask import current_app
def _render_settings_from_current_config():
config = current_app.config
return {
"database": config["db_name"],
"user": config["db_username"],
"password": config["db_password"],
"host": config["db_host"],
"port": config["db_port"],
}
def query(sql: str, db_settings: dict = None, **sql_params) -> list:
"""Connect to the database based on DB_SETTINGS and execute SQL
with SQL_PARAMS.
Note: Use sql_params and NEVER use Python string formatting to
avoid SQL Injection Attacks."""
if not db_settings:
db_settings = _render_settings_from_current_config()
with psycopg2.connect(**db_settings) as conn:
with conn.cursor() as cursor:
cursor.execute(sql, sql_params)
return cursor.fetchall()
def get_tables() -> list:
"""Get the tables in the current database."""
SQL = """SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'"""
return query(sql=SQL)
| [
"rexwangcc@gmail.com"
] | rexwangcc@gmail.com |
0be6afb5db488b9ad36be9deefede6211c4a3f37 | a02bd3d7ad77d0994a495da9870113591db13444 | /Part2/calc2.py | fe6dc22c6479f17cc025dfa8ec864419294ee75f | [] | no_license | devjunhong/simpleInterpreter | df7807f3460adb5898b7ce2b38c1ee6041e6eb42 | 8793711eaef02a9f29f201393d88c03f866d3512 | refs/heads/master | 2020-04-01T16:15:10.126792 | 2019-02-13T02:52:00 | 2019-02-13T02:52:00 | 153,372,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | # Token types
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
INTEGER, PLUS, MINUS, EOF = 'INTEGER', 'PLUS', 'MINUS', 'EOF'
class Token(object):
def __init__(self, type, value):
# token type: INTEGER, PLUS, MINUS, or EOF
self.type = type
# token value: non-negative integer value, '+', '-', or None
self.value = value
def __str__(self):
"""String representation of the class instance.
Examples:
Token(INTEGER, 3)
Token(PLUS '+')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__()
class Interpreter(object):
def __init__(self, text):
# client string input, e.g. "3 + 5", "12 - 5", etc
self.text = text
# self.pos is an index into self.text
self.pos = 0
# current token instance
self.current_token = None
self.current_char = self.text[self.pos]
def error(self):
raise Exception('Error parsing input')
def advance(self):
"""Advance the 'pos' pointer and set the 'current_char' variable."""
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
def integer(self):
"""Return a (multidigit) integer consumed from the input."""
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
return int(result)
def get_next_token(self):
"""Lexical analyzer (also known as scanner or tokenizer)
This method is responsible for breaking a sentence
apart into tokens.
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
self.error()
return Token(EOF, None)
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.get_next_token()
else:
self.error()
def expr(self):
"""Parser / Interpreter
expr -> INTEGER PLUS INTEGER
expr -> INTEGER MINUS INTEGER
"""
# set current token to the first token taken from the input
self.current_token = self.get_next_token()
# we expect the current token to be an integer
left = self.current_token
self.eat(INTEGER)
# we expect the current token to be either a '+' or '-'
op = self.current_token
if op.type == PLUS:
self.eat(PLUS)
else:
self.eat(MINUS)
# we expect the current token to be an integer
right = self.current_token
self.eat(INTEGER)
# after the above call the self.current_token is set to
# EOF token
# at this point either the INTEGER PLUS INTEGER or
# the INTEGER MINUS INTEGER sequence of tokens
# has been successfully found and the method can just
# return the result of adding or subtracting two integers,
# thus effectively interpreting client input
if op.type == PLUS:
result = left.value + right.value
else:
result = left.value - right.value
return result
def main():
while True:
try:
# To run under Python3 replace 'raw_input' call
# with 'input'
text = raw_input('calc> ')
except EOFError:
break
if not text:
continue
interpreter = Interpreter(text)
result = interpreter.expr()
print(result)
if __name__ == '__main__':
main() | [
"junhong.kim@milliman.com"
] | junhong.kim@milliman.com |
4161eecca6148d937ab2bcd601a934e81e885d24 | 2d7c6461c6af13c3938e91de09883e3e817fc21b | /connectedjuniors/posts/migrations/0004_auto_20201007_1942.py | c34132668362412b2c94314c4768cda0ae1f2c89 | [] | no_license | manishthakurhere/connectedjuniors | 64bcbfc1cc261be4f242fe373ad115ef865233e7 | 05abef607069b0c87ebd42770aa18aa5c4edbb43 | refs/heads/master | 2023-02-05T10:40:40.691471 | 2020-12-16T16:05:51 | 2020-12-16T16:05:51 | 322,033,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # Generated by Django 3.1 on 2020-10-07 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20201007_1533'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(default='uncategorized', max_length=255),
),
]
| [
"manishthakurhere@gmail.com"
] | manishthakurhere@gmail.com |
430f1f7f8a7c02429470b2c79150c172f4170511 | 24a40c0abd0d363318360081dd7a4f9f4ed2a5d1 | /Developer/Python_Definitivo/Exercícios/Listas - Refeito (parte 2)/Ex 83 – Validando expressões matemáticas.py | 38332393008ceef86a4b359c8eefcc384f0d504c | [] | no_license | andrelima19/Projetos_Python | c4c399975f6e2f8755311008d1af87f9a0554963 | 4e7e96c19379625cb498f28a3eabc30cbd514259 | refs/heads/main | 2023-07-26T04:40:37.879183 | 2021-08-31T00:24:32 | 2021-08-31T00:24:32 | 343,010,230 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # Exercício Python 083: Crie um programa onde o usuário digite uma expressão qualquer que use parênteses.
# Seu aplicativo deverá analisar se a expressão passada está com os parênteses abertos e fechados na ordem correta.
| [
"a.andreluislima@gmail.com"
] | a.andreluislima@gmail.com |
bc35d37cce8170a1fc6e960d5ed877d19de0450d | 00377b7f3f704b26262a2bc8ed1e2661c3cc22ee | /Input_Output/1.py | edc6e4cb0d82bb938b21810d4b98f8cbff82f630 | [] | no_license | canshot/selflearning-Python-Ruby-Jaewan | 453d1a8a41d5746f5e6646616591bd7c6f0b335f | 6ea8a507620290a444794688360b1089d68b25d0 | refs/heads/master | 2021-09-04T02:35:43.705676 | 2018-01-14T19:26:49 | 2018-01-14T19:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | in_str = input("insert your PW")
print(in_str.upper()+" World!")
| [
"limjaewan@Lab448Print.local"
] | limjaewan@Lab448Print.local |
77396e5d3c927e5543de2e57216c4e90e4e2a686 | 6c47debab1dda6ca5c10ff450785152d5a5551c0 | /CSV_Data_Process.py | 3425804ad7df230217bcea2e7f138572e61ea1ee | [] | no_license | bishalkunwar/Data_Science_Python | 339402d11ba7b0e1056efc2c244ac33972c9a61a | 1c2ab694f76c8ec56f985bf8309901a93234f136 | refs/heads/main | 2023-03-29T23:15:38.451774 | 2021-04-05T18:48:16 | 2021-04-05T18:48:16 | 354,936,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import pandas as pd
data = pd.read_csv('try_data.csv')
#for json data pd.read_json and extension of data format is .json
# for excel data pd.read_excel and the extension of data format is .xlsx
print(data)
print()
print (data.loc[[1,2,5],['name','salary']])
#Outputs Serially
''' name department salary remarks
0 Bishal It 10000 G:N
1 Ram Manu 12000 G:G
2 Shyam Serv 13000 G:G
3 Hari food 14000 G:G
4 Gita Pantry 15000 B:G
5 Sita no 16000 B:B
name salary
1 Ram 12000
2 Shyam 13000
5 Sita 16000''' | [
"noreply@github.com"
] | noreply@github.com |
1e93379db7739fa2b85b0811535ccec15813f695 | 3014f65daf3a2473cba81dabfce9ed9d81af15ff | /core/models.py | aa1cb33e9a8f540c8f7a9d42b21b88069aff77a7 | [] | no_license | alxpoa/agenda | ab3cc2f449b06544f9d1f183f0c5a0856a8995e9 | 7fbd27e2ecee71ee3fd12910394e1a4a0568db95 | refs/heads/master | 2020-09-27T13:28:15.398827 | 2019-12-09T16:14:45 | 2019-12-09T16:14:45 | 226,528,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Evento(models.Model):
titulo = models.CharField(max_length=100)
descricao = models.TextField(blank=True, null=True)
data_evento = models.DateTimeField(verbose_name = "Data do Evento")
data_criacao = models.DateTimeField(auto_now=True, verbose_name="Data de Criação")
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table = 'evento'
def __str__(self):
return self.titulo
def get_data_evento(self):
return self.data_evento.strftime('%d/%m/%Y %H:%M Hrs') | [
"alxpoa@gmail.com"
] | alxpoa@gmail.com |
f1b53459b83dcd24505c18cb42c5cb2963030d4d | e8532a8d86d98eeb32ab15b0410ec15d904ea39f | /profiles/settings.py | 3f32d625bb4d03babd929836ab0343e429b02cb0 | [
"MIT"
] | permissive | yans0o0o0/profiles-api | 87ae4261865a3de25a85f95cd7d3d69cba850912 | 07a11c1c37a87e22b9684d43c7f10e40626848f4 | refs/heads/master | 2022-12-19T03:24:03.714899 | 2020-09-28T21:52:27 | 2020-09-28T21:52:27 | 298,104,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | """
Django settings for profiles project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i15az6n0b%2(@&klir&qy@upz--3h%qx_#_80js(pdfwijn)@_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-3-19-229-167.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third parts apps
'rest_framework',
'rest_framework.authtoken',
# Our Apps
'profile_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
# By default Django uses "User" as the User Model
# We gonna modify this to put as the user model our UserProfile
AUTH_USER_MODEL='profile_api.UserProfile'
| [
"yansiverio@gmail.com"
] | yansiverio@gmail.com |
cdbe6cb7658953c0b092727fc4fecf6341ecc704 | a98e9a494cbc8dc4319fea3986e805ca08c61356 | /custom_model_runner/datarobot_drum/resource/predict_mixin.py | 144c26eae3cf89b305411794078dc56edd074227 | [
"Apache-2.0"
] | permissive | drdwa/datarobot-user-models | e24b91adee91b9af2e6d730ebf4aae79c7dc1ea5 | b890f88975c67c90bd5f3aef3abef872591ad295 | refs/heads/master | 2023-01-15T00:35:52.654073 | 2020-11-23T20:54:45 | 2020-11-23T20:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,192 | py | import os
import tempfile
from flask import request, Response
import werkzeug
from datarobot_drum.drum.common import (
REGRESSION_PRED_COLUMN,
TargetType,
UnstructuredDtoKeys,
PredictionServerMimetypes,
)
from datarobot_drum.resource.unstructured_helpers import (
_resolve_incoming_unstructured_data,
_resolve_outgoing_unstructured_data,
)
from datarobot_drum.drum.server import (
HTTP_200_OK,
HTTP_422_UNPROCESSABLE_ENTITY,
)
class PredictMixin:
"""
This class implements predict flow shared by PredictionServer and UwsgiServing classes.
This flow assumes endpoints implemented using Flask.
"""
def do_predict(self, logger=None):
response_status = HTTP_200_OK
file_key = "X"
filestorage = request.files.get(file_key)
if not filestorage:
wrong_key_error_message = (
"Samples should be provided as a csv, mtx, or arrow file under `{}` key.".format(
file_key
)
)
if logger is not None:
logger.error(wrong_key_error_message)
response_status = HTTP_422_UNPROCESSABLE_ENTITY
return {"message": "ERROR: " + wrong_key_error_message}, response_status
else:
if logger is not None:
logger.debug("Filename provided under X key: {}".format(filestorage.filename))
_, file_ext = os.path.splitext(filestorage.filename)
with tempfile.NamedTemporaryFile(suffix=file_ext) as f:
filestorage.save(f)
f.flush()
out_data = self._predictor.predict(f.name)
if self._target_type == TargetType.UNSTRUCTURED:
response = out_data
else:
num_columns = len(out_data.columns)
# float32 is not JSON serializable, so cast to float, which is float64
out_data = out_data.astype("float")
if num_columns == 1:
# df.to_json() is much faster.
# But as it returns string, we have to assemble final json using strings.
df_json = out_data[REGRESSION_PRED_COLUMN].to_json(orient="records")
response = '{{"predictions":{df_json}}}'.format(df_json=df_json)
else:
# df.to_json() is much faster.
# But as it returns string, we have to assemble final json using strings.
df_json_str = out_data.to_json(orient="records")
response = '{{"predictions":{df_json}}}'.format(df_json=df_json_str)
response = Response(response, mimetype=PredictionServerMimetypes.APPLICATION_JSON)
return response, response_status
def do_predict_unstructured(self, logger=None):
def _validate_content_type_header(header):
ret_mimetype, content_type_params_dict = werkzeug.http.parse_options_header(header)
ret_charset = content_type_params_dict.get("charset")
return ret_mimetype, ret_charset
response_status = HTTP_200_OK
kwargs_params = {}
data = request.data
mimetype, charset = _validate_content_type_header(request.content_type)
data_binary_or_text, mimetype, charset = _resolve_incoming_unstructured_data(
data,
mimetype,
charset,
)
kwargs_params[UnstructuredDtoKeys.MIMETYPE] = mimetype
if charset is not None:
kwargs_params[UnstructuredDtoKeys.CHARSET] = charset
kwargs_params[UnstructuredDtoKeys.QUERY] = request.args
ret_data, ret_kwargs = self._predictor.predict_unstructured(
data_binary_or_text, **kwargs_params
)
response_data, response_mimetype, response_charset = _resolve_outgoing_unstructured_data(
ret_data, ret_kwargs
)
response = Response(response_data)
if response_mimetype is not None:
content_type = response_mimetype
if response_charset is not None:
content_type += "; charset={}".format(response_charset)
response.headers["Content-Type"] = content_type
return response, response_status
| [
"noreply@github.com"
] | noreply@github.com |
a5bc0e8b58908e461baf83e50543d1ce01967306 | e3cc8b7f7dae80eb94d42b810657b10a2be07228 | /zips/plugin.video.metalliq-forqed/resources/lib/meta/navigation/people.py | 114ca73bd85c76b3e5b6c5ae46d4af3a857059fd | [
"Apache-2.0"
] | permissive | southpaw99/repo | 169b356c4773bb16b9e75738a41ee0571d2c7c91 | 2482a17576a4a0c615ea68339d4c12f529485fac | refs/heads/master | 2020-12-15T10:03:05.175325 | 2019-06-29T22:15:15 | 2019-06-29T22:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,066 | py | import meta.navigation.movies
import meta.navigation.tvshows
from meta import plugin, import_tmdb
from trakt import trakt
from xbmcswift2 import xbmcgui
from meta.utils.text import to_utf8
from language import get_string as _
import_tmdb()
@plugin.route('/people/list/show/<id>/<source>/<fanart>')
def people_list_show_people(id, source, fanart):
items = []
try:
if source == "imdb":
people = trakt.get_show_people(id)
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
except:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
if "cast" in people:
for actor in people["cast"]:
context_menu = [
(
"Convert to bob_xml",
"RunPlugin({0})".format(
plugin.url_for("bob_convert_person_to_xml", trakt_id=actor["person"]["ids"]["trakt"]))
)
]
image = get_person_artwork(actor)
label = "{0} ({1})".format(to_utf8(actor["person"]["name"]), to_utf8(actor["character"]))
info = actor["person"]["biography"]
items.append({'label': label,
'path': plugin.url_for("people_list_person_select", id=actor["person"]["ids"]["trakt"],
name=to_utf8(actor["person"]["name"])),
'info': info,
'thumbnail': image,
'poster': image,
'context_menu': context_menu,
'icon': "DefaultVideo.png",
'properties': {'fanart_image': fanart},
})
return plugin.finish(items=items)
@plugin.route('/people/list/movie/<id>/<source>/<fanart>')
def people_list_movie_people(id, source, fanart):
items = []
try:
if source == "imdb":
people = trakt.get_movie_people(id)
elif source == "tmdb":
ids = trakt.find_trakt_ids("tmdb", id)
if ids:
people = trakt.get_movie_people(ids["imdb"])
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
except:
xbmcgui.Dialog().ok("Error", "No cast info found")
return plugin.finish(items=[])
if "cast" in people:
for actor in people["cast"]:
context_menu = [
(
"Convert to bob_xml",
"RunPlugin({0})".format(
plugin.url_for("bob_convert_person_to_xml", trakt_id=actor["person"]["ids"]["trakt"]))
)
]
image = get_person_artwork(actor)
label = "{0} ({1})".format(to_utf8(actor["person"]["name"]), to_utf8(actor["character"]))
info = actor["person"]["biography"]
items.append({'label': label,
'path': plugin.url_for("people_list_person_select", id=actor["person"]["ids"]["trakt"],
name=to_utf8(actor["person"]["name"])),
'info': info,
'thumbnail': image,
'poster': image,
'context_menu': context_menu,
'icon': "DefaultVideo.png",
'properties': {'fanart_image': fanart},
})
return plugin.finish(items=items)
else:
xbmcgui.Dialog().ok("Error", "No cast info found")
@plugin.route('/people/<id>/<name>/select')
def people_list_person_select(id, name):
selection = xbmcgui.Dialog().select("show {0}'s:".format(name), ["movies", "shows"])
if selection == 0:
people_list_person_movies(id)
elif selection == 1:
people_list_person_shows(id)
@plugin.route('/people/<id>/shows')
def people_list_person_shows(id):
shows = trakt.get_person_shows(id)
if shows["cast"]:
meta.navigation.tvshows.list_trakt_items(shows["cast"], 1, 1)
else:
xbmcgui.Dialog().ok("Error", "No shows found")
@plugin.route('/people/<id>/movies')
def people_list_person_movies(id):
movies = trakt.get_person_movies(id)
if movies["cast"]:
meta.navigation.movies.list_trakt_movies_plain(movies["cast"])
else:
xbmcgui.Dialog().ok("Error", "No movies found")
def get_person_artwork(item):
person_id = item['person']['ids']['trakt']
person_tmdb_id = item['person']['ids']['tmdb']
try:
person_images = tmdb.People(person_tmdb_id).images()['profiles']
return 'https://image.tmdb.org/t/p/w640' + person_images[0]['file_path']
except:
return 'https://github.com/metalmagic767/themes/raw/master/metalliq-forqed/default//unavailable_movieposter.png'
| [
""
] | |
67433c53ab09e47fd8566c046cb7de38f32f1cfd | 326940c9e5ca002ec8c3400e45cd6e3cb4c2b98c | /Computational Methods for EE/Assignment 2 - Spline Interpolation/q1-q2/q1.py | 3966a43b671534fdf4680dfe6e199afb7f1a7e11 | [] | no_license | suraj93/IITM-Course-Codes | d33de57b7b8759a8f56d77e6f00d3110cba4c5c6 | ed0ca14cdff0341580122f0d4e1a196f1417e1e4 | refs/heads/master | 2016-09-06T14:05:05.470723 | 2014-12-02T05:24:22 | 2014-12-02T05:24:22 | 24,233,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import numpy as np
from scipy.special import jn,jv
from matplotlib.pyplot import *
def func(x):
y=(x**(1+jv(0,x)))/(np.sqrt((1+100*(x**2))*(1-x)))
return y
def func_deriv(x):
if x==0:
return 0
num=np.power(x,1+jv(0,x))
dnum=num*(-1*jv(1,x)*np.log(x)+(1+jv(0,x))/x)
den=np.sqrt(1-x+100*(x**2)-100*(x**3))
dden=(-1+200*x-300*(x**2))/(2*den)
df=(den*dnum-dden*num)/((den**2))
return df
x=np.arange(0,0.901,0.05)
y=func(x)
dy=[func_deriv(xx) for xx in x]
print dy
plot(x,dy)
show() | [
"surajh.93@gmail.com"
] | surajh.93@gmail.com |
101b641690e7cda59c300f207ef57d7b4d613baa | ac10ccaf44a7610d2230dbe223336cd64f8c0972 | /ms2ldaviz/basicviz/migrations/0033_auto_20160920_0859.py | b74d76b496f5d8f05e297caac658ce76fd904faf | [] | no_license | ymcdull/ms2ldaviz | db27d3f49f43928dcdd715f4a290ee3040d27b83 | bd5290496af44b3996c4118c6ac2385a5a459926 | refs/heads/master | 2020-05-21T03:04:29.939563 | 2017-03-14T11:44:42 | 2017-03-14T11:44:42 | 84,564,829 | 0 | 0 | null | 2017-03-10T13:54:23 | 2017-03-10T13:54:22 | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0032_auto_20160920_0857'),
]
operations = [
migrations.RemoveField(
model_name='alphacorroptions',
name='multifileexperiment',
),
migrations.DeleteModel(
name='AlphaCorrOptions',
),
]
| [
"="
] | = |
817a7cf6a9e89ea4e451b5c8ec929b7fddd5aca4 | a2764e06558fb659c5a2d919cd4428a5e0905e16 | /env/lib/python2.7/site-packages/github3/notifications.py | 2dc3747c10093d227db0564d9d13e900cbacd8f3 | [] | no_license | jesicamarquez/spotify-api-project | 89cbc98a330dcf1a2624df01240427f9b467cbc6 | 075739441b93875450d664c078738686bae351e8 | refs/heads/master | 2021-01-20T12:12:27.727693 | 2015-03-04T02:23:24 | 2015-03-04T02:23:24 | 28,785,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,316 | py | # -*- coding: utf-8 -*-
"""
github3.notifications
=====================
This module contains the classes relating to notifications.
See also: http://developer.github.com/v3/activity/notifications/
"""
from __future__ import unicode_literals
from json import dumps
from github3.models import GitHubCore
class Thread(GitHubCore):
"""The :class:`Thread <Thread>` object wraps notification threads. This
contains information about the repository generating the notification, the
subject, and the reason.
Two thread instances can be checked like so::
t1 == t2
t1 != t2
And is equivalent to::
t1.id == t2.id
t1.id != t2.id
See also:
http://developer.github.com/v3/activity/notifications/#view-a-single-thread
"""
def __init__(self, notif, session=None):
super(Thread, self).__init__(notif, session)
self._api = notif.get('url')
#: Comment responsible for the notification
self.comment = notif.get('comment', {})
#: Thread information
self.thread = notif.get('thread', {})
from github3.repos import Repository
#: Repository the comment was made on
self.repository = Repository(notif.get('repository', {}), self)
#: When the thread was last updated
self.updated_at = self._strptime(notif.get('updated_at'))
#: Id of the thread
self.id = notif.get('id')
#: Dictionary of urls for the thread
self.urls = notif.get('urls')
#: datetime object representing the last time the user read the thread
self.last_read_at = self._strptime(notif.get('last_read_at'))
#: The reason you're receiving the notification
self.reason = notif.get('reason')
#: Subject of the Notification, e.g., which issue/pull/diff is this in
#: relation to. This is a dictionary
self.subject = notif.get('subject')
self.unread = notif.get('unread')
def _repr(self):
return '<Thread [{0}]>'.format(self.subject.get('title'))
def delete_subscription(self):
"""Delete subscription for this thread.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def is_unread(self):
"""Tells you if the thread is unread or not."""
return self.unread
def mark(self):
"""Mark the thread as read.
:returns: bool
"""
return self._boolean(self._patch(self._api), 205, 404)
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this thread
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
:returns: :class:`Subscription <Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
sub = {'subscribed': subscribed, 'ignored': ignored}
json = self._json(self._put(url, data=dumps(sub)), 200)
return Subscription(json, self) if json else None
def subscription(self):
"""Checks the status of the user's subscription to this thread.
:returns: :class:`Subscription <Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
class Subscription(GitHubCore):
"""The :class:`Subscription <Subscription>` object wraps thread and
repository subscription information.
See also:
http://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
"""
def __init__(self, sub, session=None):
super(Subscription, self).__init__(sub, session)
self._api = sub.get('url')
#: reason user is subscribed to this thread/repository
self.reason = sub.get('reason')
#: datetime representation of when the subscription was created
self.created_at = self._strptime(sub.get('created_at'))
#: API url of the thread if it exists
self.thread_url = sub.get('thread_url')
#: API url of the repository if it exists
self.repository_url = sub.get('repository_url')
self.ignored = sub.get('ignored', False)
self.subscribed = sub.get('subscribed', False)
def _repr(self):
return '<Subscription [{0}]>'.format(self.subscribed)
def delete(self):
return self._boolean(self._delete(self._api), 204, 404)
def is_ignored(self):
return self.ignored
def is_subscribed(self):
return self.subscribed
def set(self, subscribed, ignored):
"""Set the user's subscription for this subscription
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
json = self._json(self._put(self._api, data=dumps(sub)), 200)
self.__init__(json, self._session)
| [
"jesica.v.marquez@gmail.com"
] | jesica.v.marquez@gmail.com |
cb915a83c326ed9358735e7e6a6123656ae20d18 | f00ae2cb4709539e8a78247678d9bb51913e0373 | /oacids/schedules/schedule.py | 76499b213fe0838b48b11e39aed9eecb971f06d3 | [
"MIT"
] | permissive | openaps/oacids | 576351d34d51c62492fc0ed8be5e786273f27aee | ed8d6414171f45ac0c33636b5b00013e462e89fb | refs/heads/master | 2021-01-10T06:03:53.395357 | 2016-03-21T04:02:47 | 2016-03-21T04:02:47 | 51,559,470 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 343 | py |
from openaps.configurable import Configurable
import recurrent
class Schedule (Configurable):
prefix = 'schedule'
required = [ 'phases', 'rrule' ]
url_template = "schedule://{name:s}/{rrule:s}"
@classmethod
def parse_rrule (Klass, rrule):
parser = recurrent.RecurringEvent( )
rule = parser.parse(rrule)
return rule
| [
"bewest@gmail.com"
] | bewest@gmail.com |
846029f797948ff4c428cce8a5922b17ffbbd67d | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/_monitor_management_client.py | c050f4b4aa8fc88df3e7a1e1c02c2d1b67f42612 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 3,731 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricsOperations
from .operations import ServiceDiagnosticSettingsOperations
from .. import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar metrics: MetricsOperations operations
:vartype metrics: $(python-base-namespace).v2016_09_01.aio.operations.MetricsOperations
:ivar service_diagnostic_settings: ServiceDiagnosticSettingsOperations operations
:vartype service_diagnostic_settings: $(python-base-namespace).v2016_09_01.aio.operations.ServiceDiagnosticSettingsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_diagnostic_settings = ServiceDiagnosticSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
http_request.url = self._client.format_url(http_request.url)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | noreply@github.com |
e34b085b9bbc988d0ccd664acc4c3a15e17ae509 | 9eb9a74b323aa5e55c344cef9bea3df26c8901fc | /euler4.py | 3f27e5420724e669f8854a4950e36523c43988df | [] | no_license | BCarley/Euler | 6e60df448a0508fa55b0c16e55c87763ddd5e236 | a0029ab348c4388e824cf2e1d374b4a902b915e4 | refs/heads/master | 2016-09-05T15:56:12.097080 | 2015-09-01T08:36:44 | 2015-09-01T09:26:28 | 38,889,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | """euler question 4
Trying to find the largest palindromic product of 2 three digit numbers.
attempting to use the quadratic sieve method for this"""
import math
import numpy as np
def is_palindromic(num):
"""tests a number to see is palindromic, returns Bool"""
if str(num)[::-1] == str(num):
return True
else:
return False
def generate_primes(end=10000000):
"""generator function that yeilds primes that are, by default
less than 10000000
10000000 is about the limit the sieve will work to until we start running out of memory"""
np1 = end + 1
s = range(end + 1)
s[1] = 0
for i in xrange(2, np1):
if s[i]:
try:
s[i*i: np1: i] = [0] * len(range(i*i, np1, i))
yield i
except MemoryError, e :
raise "Run out of memory, range is too long for c long!! \n Exception: %s" % e
def get_quadratic_residues(num, no_primes):
"""returns a list of the first num primes"""
factor_base = []
for prime in generate_primes():
if (num ** ((prime - 1)/2)) % prime == 1:
factor_base.append(prime)
if len(factor_base) > no_primes:
break
return factor_base
def sieve_values(num, values, factor_base):
"""performs a sieve on the values to return a list of numbers,
numbers that are returned as 1 are smooth numbers"""
for prime in factor_base:
cnt = 0
for (index, i) in enumerate(values):
if ((index + int(math.ceil(num ** 0.5))) ** 2 - num) % prime == 0:
cnt += 1
#print "divided by %i" % (prime), values
values[index::prime] = [int(value/prime) for value in values[index::prime]]
#print "Divided by %i at index %i, count is %i:" % (prime, index, cnt), values, "\n"
if prime == 2 or cnt == 2:
break
return values
def construct_matrix(num, values, factor_base):
"""returns a dictionary of factor vectors"""
smooth_x = []
for (index, value) in enumerate(values):
if value == 1:
smooth_x.append(index)
smooth_y = [((x + math.ceil(num ** 0.5))**2 - num) for x in smooth_x]
matrish = []
for y in smooth_y:
matrish.append([div_into(y, prime) % 2 for prime in factor_base])
m = matrish
return m
def div_into(x, y):
""" """
cnt = 0
while True:
if x % y == 0:
x /= y
cnt += 1
else:
break
return cnt
def factorise(num):
"""perform a quadratic sieve to find the largest factors of num"""
values = [(i + math.ceil(num ** 0.5)) ** 2 - num for i in xrange(-100, 100)]
factor_base = get_quadratic_residues(num, no_primes=100)
print "Factor Base:", factor_base
sieved_values = sieve_values(num, values, factor_base)
return construct_matrix(num, sieved_values, factor_base)
x = factorise(977779)
print "Sieved Values:", x
| [
"ben.carley@bipb.com"
] | ben.carley@bipb.com |
d34f105c69e1b5bc0a5ad34388dba471f066c4b5 | 4734fd79ebc8c10b6bec3d2e1995bc2534799f2e | /school_attendance/models/__init__.py | 31613f97af3bd621fb997d7d91cff7ec16320881 | [] | no_license | tringuyenhashmicro/Boston | 46da227957c996e674b9d56097f7967a77cfb274 | 8697a373da479e4f5b25681c0d551affdc83194a | refs/heads/master | 2021-04-12T09:57:42.480082 | 2017-09-13T03:19:21 | 2017-09-13T03:19:21 | 94,525,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | # -*- coding: utf-8 -*-
import attendance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"huutringuen88@gmail.com"
] | huutringuen88@gmail.com |
34d40f192d76b051fd22025ecf2e80bfce435750 | 68b76ff5dea447653d0988eb551ca2ee0e1bcd58 | /omniROS_ws/src/wcomputation/src/wcomputation_node.py | d3f5c73f860a56a8ea8594e62698e3ad432e8dfd | [
"MIT"
] | permissive | RobinBaruffa/omniROS | b671fce445824f4295daa58135c02376f37ecc88 | b7393f1b0f0dea447b6daa509169acf3fd56260b | refs/heads/master | 2021-06-19T01:17:54.138655 | 2021-02-09T13:38:39 | 2021-02-09T13:38:39 | 177,563,483 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | #!/usr/bin/env python
import rospy
from math import *
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Point
from geometry_msgs.msg import Twist
def compute(msg):
result = Vector3()
r= 0.029
R=0.125
sinpi3 = 0.86602
result.x = (-msg.linear.x + R * msg.angular.z) / r;
result.y = (-sinpi3 * msg.linear.y + 0.5 * (msg.linear.x) + R *msg.angular.z) / r;
result.z = (sinpi3 * msg.linear.y + 0.5 * (msg.linear.x) + R * msg.angular.z) / r;
'''
result.x = (msg.linear.y +R *msg.angular.z) / r;
result.y = (-sinpi3 * msg.linear.x - 0.5 * msg.linear.y + R * msg.angular.z) / r;
result.z = (sinpi3 * msg.linear.x - 0.5 * msg.linear.y + R * msg.angular.z) / r;
'''
pub.publish(result)
rospy.loginfo(result)
return 0
def main():
rospy.init_node('wcomputation_node')
global pub
pub= rospy.Publisher('/omniROS/vel_w', Vector3, queue_size=10)
rospy.Subscriber("/omniROS/cmd_vel",Twist ,compute)
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| [
"robin.baruffa@gmail.com"
] | robin.baruffa@gmail.com |
f07fe830ae79276ded6e7b048e9d60d425affc20 | dff51e4a3bbcc464c0069a16f1394d36c31e2372 | /omaha_server/omaha/migrations/0021_auto_20150917_1028.py | aafbc492fed9e17886ba9b145e2bda1949d378ef | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | tuladhar/omaha-server | 3264de057221322038c7af704ea268c9e267d7da | 6cfd86e4319e03af0eb319fae6c867691ffc2c36 | refs/heads/master | 2022-11-21T19:38:50.335963 | 2020-06-09T14:14:03 | 2020-06-09T14:14:03 | 281,736,223 | 1 | 0 | NOASSERTION | 2020-07-22T17:02:48 | 2020-07-22T17:02:47 | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import omaha.models
class Migration(migrations.Migration):
dependencies = [
('omaha', '0020_auto_20150710_0913'),
]
operations = [
migrations.AlterField(
model_name='version',
name='file',
field=models.FileField(null=True, upload_to=omaha.models._version_upload_to),
),
]
| [
"amekin@crystalnix.com"
] | amekin@crystalnix.com |
c8451b00b14ff3eef4cf17e896855e497bf843a3 | 5f5d845e383c6ed603fcb105f09bbc5811b2124a | /teuthology/test/test_packaging.py | 04f91d8173ec7a0efbd06ed0025345b3ae9e76f1 | [
"MIT"
] | permissive | vasukulkarni/teuthology | 0a2bed271dfd549c6966c561f97478182b0b28ea | f3a4e5e155f20ac4c46cfb8b66cc7170672f1f87 | refs/heads/master | 2020-04-08T16:39:48.897351 | 2015-03-17T00:01:45 | 2015-03-17T00:01:45 | 32,357,309 | 0 | 0 | null | 2015-03-16T22:42:04 | 2015-03-16T22:42:04 | null | UTF-8 | Python | false | false | 5,034 | py | import pytest
from mock import patch, Mock
from teuthology import packaging
class TestPackaging(object):
@patch("teuthology.packaging.misc")
def test_get_package_name_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
assert packaging.get_package_name('sqlite', Mock()) == "sqlite3"
@patch("teuthology.packaging.misc")
def test_get_package_name_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_package_name('sqlite', Mock()) is None
@patch("teuthology.packaging.misc")
def test_get_package_name_not_found(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_package_name('notthere', Mock()) is None
@patch("teuthology.packaging.misc")
def test_get_service_name_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
assert packaging.get_service_name('httpd', Mock()) == 'apache2'
@patch("teuthology.packaging.misc")
def test_get_service_name_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_service_name('httpd', Mock()) == 'httpd'
@patch("teuthology.packaging.misc")
def test_get_service_name_not_found(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
assert packaging.get_service_name('notthere', Mock()) is None
@patch("teuthology.packaging.misc")
def test_install_package_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
m_remote = Mock()
expected = [
'DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'install',
'apache2'
]
packaging.install_package('apache2', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_install_package_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
m_remote = Mock()
expected = [
'sudo',
'yum',
'-y',
'install',
'httpd'
]
packaging.install_package('httpd', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_remove_package_deb(self, m_misc):
m_misc.get_system_type.return_value = "deb"
m_remote = Mock()
expected = [
'DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'purge',
'apache2'
]
packaging.remove_package('apache2', m_remote)
m_remote.run.assert_called_with(args=expected)
@patch("teuthology.packaging.misc")
def test_remove_package_rpm(self, m_misc):
m_misc.get_system_type.return_value = "rpm"
m_remote = Mock()
expected = [
'sudo',
'yum',
'-y',
'erase',
'httpd'
]
packaging.remove_package('httpd', m_remote)
m_remote.run.assert_called_with(args=expected)
def test_get_koji_package_name(self):
build_info = dict(version="3.10.0", release="123.20.1")
result = packaging.get_koji_package_name("kernel", build_info)
assert result == "kernel-3.10.0-123.20.1.x86_64.rpm"
@patch("teuthology.packaging.config")
def test_get_kojiroot_base_url(self, m_config):
m_config.kojiroot_url = "http://kojiroot.com"
build_info = dict(
package_name="kernel",
version="3.10.0",
release="123.20.1",
)
result = packaging.get_kojiroot_base_url(build_info)
expected = "http://kojiroot.com/kernel/3.10.0/123.20.1/x86_64/"
assert result == expected
@patch("teuthology.packaging.config")
def test_get_koji_build_info_success(self, m_config):
m_config.kojihub_url = "http://kojihub.com"
m_proc = Mock()
expected = dict(foo="bar")
m_proc.exitstatus = 0
m_proc.stdout.getvalue.return_value = str(expected)
m_remote = Mock()
m_remote.run.return_value = m_proc
result = packaging.get_koji_build_info(1, m_remote, dict())
assert result == expected
args, kwargs = m_remote.run.call_args
expected_args = [
'python', '-c',
'import koji; '
'hub = koji.ClientSession("http://kojihub.com"); '
'print hub.getBuild(1)',
]
assert expected_args == kwargs['args']
@patch("teuthology.packaging.config")
def test_get_koji_build_info_fail(self, m_config):
m_config.kojihub_url = "http://kojihub.com"
m_proc = Mock()
m_proc.exitstatus = 1
m_remote = Mock()
m_remote.run.return_value = m_proc
m_ctx = Mock()
m_ctx.summary = dict()
with pytest.raises(RuntimeError):
packaging.get_koji_build_info(1, m_remote, m_ctx)
| [
"aschoen@redhat.com"
] | aschoen@redhat.com |
558a06e956fb8762b99ef627a77d6403aff05be4 | 9b0195fd5ffd407f6e5edf4955058299a361ca25 | /leetcode-python/huoshui/wechat_security/problem.py | 06451c59ced16a246d7992e7dfd2d0e85e9ce1c0 | [] | no_license | moqiguzhu/Online-Judge | b232dbbca8e513eb14620259c44b15b60b54e005 | 3ca0c407ffff775167b031d00fc7376c25d69e48 | refs/heads/master | 2022-01-27T05:42:51.813437 | 2022-01-18T02:17:04 | 2022-01-18T02:17:04 | 42,443,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from collections import defaultdict, deque, Counter
from typing import List
import math
import copy
import random
import numpy as np
import bisect
import inspect
import unittest
def problem1(file_path):
wxid_cnt = defaultdict(lambda x: 0)
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
wxid = line.strip()
wxid_cnt[wxid] += 1
# 边界
if len(wxid_cnt) == 0:
print('文件为空')
return ''
c = Counter(wxid_cnt)
mostapp_wxid, freq = c.most_common(1)
return mostapp_wxid
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def problem2(root):
pass
| [
"moqiguzhu@163.com"
] | moqiguzhu@163.com |
472e796961fcde83ad76ffe05488280d21018b71 | a6706f3fcfc8904d4b05d2588ed1fd62112a7c1d | /UMS_Project/wsgi.py | ac72404d2a26d892ea7b7ae713c7f10caf1e0449 | [] | no_license | sravanmandava/Assignment | c1652013e44b6bd8132a4de71dbef1870e34df99 | bd9222ad2691afe31e3905c3f71b7872260ab74f | refs/heads/master | 2023-07-15T01:26:34.773949 | 2021-08-17T06:34:22 | 2021-08-17T06:34:22 | 396,706,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for UMS_Project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'UMS_Project.settings')
application = get_wsgi_application()
| [
"sravanmandava8@gmail.com"
] | sravanmandava8@gmail.com |
fc01c6fb812fe78ca04496494d68fcc90ae706f5 | 9701d7b29f9b6961331af18ad7fe7ea6f9ee51bd | /shufflelists.py | 5d516ca06ce8628a2cd5e34a81de21bee844f6fd | [] | no_license | asouxuning/trans | 25fd784c67728c130b188ef74255676828b3fc5a | 1ea05f785b5c8d54411ca4350abbec37015fb387 | refs/heads/master | 2021-05-04T17:50:12.467873 | 2018-02-09T15:31:00 | 2018-02-09T15:31:00 | 120,280,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import numpy as np
def shufflelists(lists):
li = np.random.permutation(len(lists[0])
lo = []
for i in range(len(li)):
| [
"asouxuning@163.com"
] | asouxuning@163.com |
591136ecd2269e61855177299ef373f586107e09 | 19fc974a62cc2c7863e2dff0ff6e784c961cd2ef | /gerritssh/internal/__init__.py | 022f407c155e9ba9b1add831b227ccec053d98fe | [] | no_license | hieulq/Auto-Make-Deliver-Report | 9b6e0698b8ab55894c76536a18a71aeb86422453 | 57a5e7de8d4bad47bf6514202516ee88ee78af88 | refs/heads/master | 2021-01-09T06:58:41.500772 | 2018-02-27T09:38:57 | 2018-02-27T09:38:57 | 56,847,220 | 1 | 4 | null | 2018-02-27T09:38:58 | 2016-04-22T10:21:33 | Python | UTF-8 | Python | false | false | 34 | py | from .cmdoptions import * # noqa
| [
"hieulq@vn.fujitsu.com"
] | hieulq@vn.fujitsu.com |
19f9791df898df635f27bb0f7a40bc6647087b8f | 771d0def5238a2b311639853d784841ccd57ce4e | /BTRNN-release/BT_RNN/BTRNN.py | 5e5230ffc5a5a06f0fa51622b6dbecc39b5d1e03 | [] | no_license | TripleEss/Block-term-Tensor-Neural-Networks | 4869c7d786425803b29698615000ccbd686e7664 | 9c0f4a64b4a3eb76a56f272feccbcca7469faa23 | refs/heads/master | 2023-01-11T10:28:39.117990 | 2020-02-19T10:11:17 | 2020-02-19T10:11:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,597 | py | # Created by ay27 at 2017/10/6
from keras.layers.recurrent import Recurrent
from keras.engine import InputSpec
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from .BT_mul_Keras import *
class BT_RNN(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_RNN, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
# input shape: `(batch, time (padded with zeros), input_dim)`
# input_shape is a tuple
if isinstance(input_shape, list):
input_shape = input_shape[0]
assert len(input_shape) == 3
assert input_shape[2] == self.input_dim
self.batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
# the kernel layout is : [[core, factor0, factor1, factor2, ...],
# [core, factor0, factor1, factor2, ...],
# ...]
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, inputs, training=None):
# input shape: `(batch, time (padded with zeros), input_dim)`
return inputs
def step(self, inputs, states):
# inputs shape: [batch, input_dim]
if 0. < self.dropout < 1.:
inputs = inputs * states[1]
################################################################################################################
# NOTE: we now just substitute the `W_{xh}`
if len(self.core_ranks) == 2:
h = BT_mul2(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
h = BT_mul3(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
h = BT_mul4(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
h = BT_mul5(inputs, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
h = None
raise ValueError('error in len(core_ranks)')
if self.bias is not None:
h = K.bias_add(h, self.bias)
################################################################################################################
prev_output = states[0]
if 0. < self.recurrent_dropout < 1.:
prev_output *= states[2]
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0. < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = []
if self.implementation != 0 and 0. < self.dropout < 1.:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0. < self.recurrent_dropout < 1.:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BT_GRU(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_GRU, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
# store r, z, h
self.bt_output_shape[0] *= 3
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
# input shape: `(batch, time (padded with zeros), input_dim)`
# input_shape is a tuple
if isinstance(input_shape, list):
input_shape = input_shape[0]
assert len(input_shape) == 3
assert input_shape[2] == self.input_dim
self.batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
# store r, z, h
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight((np.prod(self.bt_output_shape),),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, x, training=None):
return x
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = [[K.cast_to_floatx(1.) for _ in range(3)]]
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def step(self, x, states):
h_tm1 = states[0] # previous memory
dp_mask = states[1] # dropout matrices for recurrent units
rec_dp_mask = states[2]
x1 = x * dp_mask[0]
################################################################################################################
# NOTE: we now just substitute the `W_{xh}`
if len(self.core_ranks) == 2:
matrix_x = BT_mul2(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
matrix_x = BT_mul3(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
matrix_x = BT_mul4(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
matrix_x = BT_mul5(x1, self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
matrix_x = None
raise ValueError('error in len(core_ranks)')
# following is same as Keras API
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BT_LSTM(Recurrent):
def __init__(self,
bt_input_shape, bt_output_shape, core_ranks, block_ranks,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
debug=False,
init_seed=11111986,
**kwargs):
super(BT_LSTM, self).__init__(**kwargs)
self.bt_input_shape = np.array(bt_input_shape)
self.bt_output_shape = np.array(bt_output_shape)
self.core_ranks = np.array(core_ranks)
self.block_ranks = int(block_ranks)
self.debug = debug
self.units = np.prod(self.bt_output_shape)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))]
self.states = None
self.kernel = None
self.recurrent_kernel = None
self.cores = [None]
self.factors = [None]
self.bias = None
self.debug = debug
self.init_seed = init_seed
# store i, f, c, o
if not self.go_backwards:
self.bt_output_shape[0] *= 4
else:
self.units = int(self.units / 4)
self.input_dim = np.prod(self.bt_input_shape)
self.params_original = np.prod(self.bt_input_shape) * np.prod(self.bt_output_shape)
self.params_bt = self.block_ranks * \
(np.sum(self.bt_input_shape * self.bt_output_shape * self.core_ranks) + np.prod(
self.core_ranks))
self.batch_size = None
# reported compress ratio in input->hidden weight
self.compress_ratio = self.params_original / self.params_bt
if self.debug:
print('bt_input_shape = ' + str(self.bt_input_shape))
print('bt_output_shape = ' + str(self.bt_output_shape))
print('core_ranks = ' + str(self.core_ranks))
print('block_ranks = ' + str(self.block_ranks))
print('compress_ratio = ' + str(self.compress_ratio))
assert len(self.core_ranks.shape) == len(self.bt_input_shape.shape) == len(self.bt_output_shape.shape)
def build(self, input_shape):
print('BT-LSTM input shape = ' + str(input_shape))
if isinstance(input_shape, list):
input_shape = input_shape[0]
self.batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(self.batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
################################################################################################################
# input -> hidden state
self.kernel = self.add_weight((self.params_bt,),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.cores, self.factors = split_kernel_into_core_and_factors(self.kernel,
self.bt_input_shape, self.bt_output_shape,
self.core_ranks, self.block_ranks)
################################################################################################################
# hidden -> hidden
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, x, training=None):
return x
def get_constants(self, inputs, training=None):
# this is totally same as the Keras API
constants = []
if self.implementation != 0 and 0. < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if len(self.core_ranks) == 2:
z = BT_mul2(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 3:
z = BT_mul3(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 4:
z = BT_mul4(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
elif len(self.core_ranks) == 5:
z = BT_mul5(inputs * dp_mask[0],
self.cores, self.factors, self.bt_input_shape, self.bt_output_shape, self.core_ranks)
else:
raise ValueError('error in len(core_ranks)')
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0. < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'bt_input_shape': self.bt_input_shape,
'bt_output_shape': self.bt_output_shape,
'core_ranks': self.core_ranks,
'block_ranks': self.block_ranks,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(BT_LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"chendi1995@sohu.com"
] | chendi1995@sohu.com |
2f02d0e8afa68349157c88c3ed9678c3edd16833 | 23c9552b39b4b840e54fcc58155c219e5a8e202d | /modules/units.py | 31fd2c3e5e1884b6f152ae07117a15c3f795b0df | [
"EFL-2.0"
] | permissive | jfriedly/jenni | cd26dd5be76378a540f740cd3bb9a122ad6f12db | 41c42a7ba13eaf57915a81d6aa6cdd188cfd7f8a | refs/heads/master | 2021-01-18T08:52:16.931326 | 2013-06-07T22:56:26 | 2013-06-07T22:56:26 | 2,134,190 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #!/usr/bin/env python
'''
units.py - jenni Units Module
Copyright 2013, Michael Yanovich (yanovich.net)
Licensed under the Eiffel Forum License 2.
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
'''
import datetime as dt
import json
import re
import web
exchange_rates = dict()
last_check = dt.datetime.now()
exchanges = ['mtgox', 'btc24', 'bitfloor', 'vcx', 'btce', 'rock', 'bitme',
'ripple', 'lybit']
def btc_page():
try:
page = web.get('http://bitcoincharts.com/t/markets.json')
except Exception, e:
print time.time(), btc, e
return False, 'Failed to reach bitcoincharts.com'
return True, page
def ppnum(num):
return re.sub("(?!\..*)(\d)(?=(\d{3})+(?!\d))", r"\1,", "%.2f" % num)
def btc(jenni, input):
'''.btc -- display the current prices for Bitcoins'''
global exchange_rates
global last_check
now = dt.datetime.now()
print 'now: ', now
print 'last: ', last_check
if (not exchange_rates) or (now - last_check > dt.timedelta(minutes=15)):
#if now - last_check > 900:
status, page = btc_page()
if status:
json_page = json.loads(page)
else:
return jenni.reply(page)
## build internal state of exchange
for each in json_page:
if each['currency'] == 'USD':
if 'USD' not in exchange_rates:
exchange_rates['USD'] = dict()
exchange_rates['USD'][each['symbol'].replace('USD', '')] = each['close']
last_check = dt.datetime.now()
response = '1 BTC (in USD) = '
symbols = exchange_rates['USD'].keys()
symbols.sort()
for each in symbols:
if each.replace('USD', '') in exchanges:
response += '%s: %s | ' % (each, exchange_rates['USD'][each])
response += 'lolcat (mtgox) index: %s | ' % (ppnum(float(exchange_rates['USD']['mtgox']) * 160))
response += 'last updated at: ' + str(last_check)
jenni.reply(response)
btc.commands = ['btc']
btc.example = '.btc'
btc.rate = 5
if __name__ == '__main__':
print __doc__.strip()
| [
"michael@yanovich.net"
] | michael@yanovich.net |
bf9a630f0c0d863fa65a6a23ed870c0df9bd8388 | de4245b9d88711e39bdc2676a2583e91aec94fde | /post/migrations/0026_auto_20210226_2135.py | 341013c69ffbf4cef5474b4c126894ce6a99198c | [] | no_license | GopinaathV/Instagram_clone | 6bc779d312bf689dd64c3da3f81c4c078128d04e | d06f97f1e38bd08c11baec87c3ef9edb594d5f9b | refs/heads/main | 2023-06-10T11:24:05.465927 | 2021-06-30T11:36:28 | 2021-06-30T11:36:28 | 381,617,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 2.2 on 2021-02-26 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0025_auto_20210226_2130'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='sender_pic',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"gopinaath16@gamil.com"
] | gopinaath16@gamil.com |
4731da9d96c4ef1421303672f8b8b4c0f711c63d | d59bad348c88026e444c084e6e68733bb0211bc2 | /problema_arg_padrao_mutavel.py | 616a4efc8c311158f135deac65c9f0a80b8121e6 | [] | no_license | dersonf/udemy-python | f96ec883decb21a68233b2e158c82db1c8878c7a | 92471c607d8324902902774284f7ca81d2f25888 | refs/heads/master | 2022-09-25T00:18:49.833210 | 2020-06-05T18:18:38 | 2020-06-05T18:18:38 | 262,049,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/python3.6
def fibonacci(sequencia=[0, 1]):
# Uso de mutáveis como valor default (armadilha)
sequencia.append(sequencia[-1] + sequencia[-2])
return sequencia
if __name__ == '__main__':
inicio = fibonacci()
print(inicio, id(inicio))
print(fibonacci(inicio))
restart = fibonacci()
print(restart, id(restart))
| [
"anderson@ferneda.com.br"
] | anderson@ferneda.com.br |
9b53ae79a71c0ca8f4401dd807a2369dca415e84 | 90736089f21562da1fb189aa80e6ba1012682aa5 | /gs-lab-manual/game-asteroids/asteroids.py | f0c3a8dd4a7eda803c89623d67a4ac3a484ebd4c | [] | no_license | CapaFenLisesi/Physics-For-Video-Games | e9e1a1d924f867e9bee05fae0d4557fc2abe97ad | 8ca7dda24977407055eba7d29d9da5970432ff77 | refs/heads/master | 2021-01-22T15:16:13.635568 | 2016-06-11T02:24:52 | 2016-06-11T02:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,309 | py | from visual import *
import random
#converts an angle in degrees to an angle in radians
def rad(degrees):
radians=degrees*pi/180
return radians
#pause and wait for mouse or keyboard event, then continue
def pause():
while True:
rate(50)
if scene.mouse.events:
m = scene.mouse.getevent()
if m.click == 'left': return
elif scene.kb.keys:
k = scene.kb.getkey()
return
#checks for a collision between two spheres
def collisionSpheres(sphere1, sphere2):
dist=mag(sphere1.pos-sphere2.pos)
if(dist<sphere1.radius+sphere2.radius):
return True
else:
return False
#checks for a collision between a cone and a sphere
def collisionConeSphere(c, s):
#result is the variable that we will return
#default is False
result=False
#check pos of cone
if(collisionSphereAndPoint(s,c.pos)):
result=True
#check tip of cone
result=False
tip=c.pos+c.axis
if(collisionSphereAndPoint(s,tip)):
result=True
#check edge of radius in x-y plane 1
r1=c.radius*cross(vector(0,0,1),norm(c.axis))
if(collisionSphereAndPoint(s,r1+c.pos)):
result=True
#check edge of radius in x-y plane 2
r2=-c.radius*cross(vector(0,0,1),norm(c.axis))
if(collisionSphereAndPoint(s,r2+c.pos)):
result=True
#return result
return result
#determines whether a point is within a sphere or not
#returns boolean
def collisionSphereAndPoint(sphereObj, targetVector):
dist=mag(sphereObj.pos-targetVector)
if(dist<sphereObj.radius):
return True
else:
return False
#creates four asteroids, one on each side of the scene
def createAsteroids():
#asteroid comes from the right
asteroid=sphere(pos=vector(20,0,0), radius=1, color=color.cyan)
asteroid.pos.y=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=-random.randint(1,5)
asteroid.v.y=random.choice((1,-1))*random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the left
asteroid=sphere(pos=vector(-20,0,0), radius=1, color=color.cyan)
asteroid.pos.y=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.randint(1,5)
asteroid.v.y=random.choice((1,-1))*random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the top
asteroid=sphere(pos=vector(0,20,0), radius=1, color=color.cyan)
asteroid.pos.x=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.choice((1,-1))*random.randint(1,5)
asteroid.v.y=-random.randint(1,5)
asteroidList.append(asteroid)
#asteroid comes from the bottom
asteroid=sphere(pos=vector(0,-20,0), radius=1, color=color.cyan)
asteroid.pos.x=random.randrange(-20,20,5)
asteroid.m=1
asteroid.v=vector(0,0,0)
asteroid.v.x=random.choice((1,-1))*random.randint(1,5)
asteroid.v.y=random.randint(1,5)
asteroidList.append(asteroid)
def createFragments(asteroid):
fragment1=sphere(pos=asteroid.pos, radius=0.5, color=color.magenta)
fragment2=sphere(pos=asteroid.pos, radius=0.5, color=color.magenta)
fragment1.m=0.5
fragment2.m=0.5
fragment1.v=vector(0,0,0)
fragment1.v.x=random.choice((1,-1))*random.randint(1,5)
fragment1.v.y=random.choice((1,-1))*random.randint(1,5)
fragment2.v=2*asteroid.v-fragment1.v
fragmentList.append(fragment1)
fragmentList.append(fragment2)
#scene size
scene.range=20
scene.width=700
scene.height=700
#create the spaceship as a cone
spaceship = cone(pos=(0,0,0), axis=(2,0,0), radius=1, color=color.white)
fire = cone(pos=(0,0,0), axis=-spaceship.axis/2, radius=spaceship.radius/2, color=color.orange)
#initial values for mass, velocity, thrust, and net force
spaceship.m=1
spaceship.v=vector(0,0,0)
thrust=0
Fnet=vector(0,0,0)
#bullets
bulletspeed=10
bulletsList=[]
#angle to rotate
dtheta=rad(10)
#clock
t=0
dt=0.005
#asteroids
Nleft=0 #counter for number of asteroids left in the scene
asteroidList=[]
createAsteroids()
#fragments
fragmentList=[]
while spaceship.visible==1:
rate(200)
if scene.kb.keys:
k = scene.kb.getkey()
if k == "up": #turn thruster on
thrust=6
elif k=="left": #rotate left
spaceship.rotate(angle=-dtheta, axis=(0,0,-1));
elif k=="right": #rotate right
spaceship.rotate(angle=dtheta, axis=(0,0,-1));
elif k==" ": #fire a bullet
bullet=sphere(pos=spaceship.pos+spaceship.axis, radius=0.1, color=color.yellow)
bullet.v=bulletspeed*norm(spaceship.axis)+spaceship.v
bulletsList.append(bullet)
elif k=="q": #pause the game
pause()
else: #turn thruster off
thrust=0
Fnet=thrust*norm(spaceship.axis)
spaceship.v=spaceship.v+Fnet/spaceship.m*dt
spaceship.pos=spaceship.pos+spaceship.v*dt
fire.pos=spaceship.pos
fire.axis=-spaceship.axis/2
#check if the spaceship goes off screen and wrap
if spaceship.pos.x>20 or spaceship.pos.x<-20:
spaceship.pos=spaceship.pos-spaceship.v*dt
spaceship.pos.x=-spaceship.pos.x
if spaceship.pos.y>20 or spaceship.pos.y<-20:
spaceship.pos=spaceship.pos-spaceship.v*dt
spaceship.pos.y=-spaceship.pos.y
#update positions of bullets and check if bullets go off screen
for thisbullet in bulletsList:
if thisbullet.pos.x>20 or thisbullet.pos.x<-20:
thisbullet.visible=0
if thisbullet.pos.y>20 or thisbullet.pos.y<-20:
thisbullet.visible=0
if thisbullet.visible != 0:
thisbullet.pos=thisbullet.pos+thisbullet.v*dt
#update positions of asteroids
for thisasteroid in asteroidList:
if thisasteroid.visible==1:
thisasteroid.pos=thisasteroid.pos+thisasteroid.v*dt
#check for collision with spaceship
if(collisionConeSphere(spaceship,thisasteroid)):
spaceship.visible=0
fire.visible=0
#wrap at edge of screen
if thisasteroid.pos.x>20 or thisasteroid.pos.x<-20:
thisasteroid.pos=thisasteroid.pos-thisasteroid.v*dt
thisasteroid.pos.x=-thisasteroid.pos.x
if thisasteroid.pos.y>20 or thisasteroid.pos.y<-20:
thisasteroid.pos=thisasteroid.pos-thisasteroid.v*dt
thisasteroid.pos.y=-thisasteroid.pos.y
#check for collision with bullets
for thisbullet in bulletsList:
if(collisionSpheres(thisbullet,thisasteroid)and thisbullet.visible==1):
thisasteroid.visible=0
thisbullet.visible=0
createFragments(thisasteroid)
#update positions of fragments
for thisfragment in fragmentList:
if thisfragment.visible==1:
thisfragment.pos=thisfragment.pos+thisfragment.v*dt
#check for collision with spaceship
if(collisionConeSphere(spaceship,thisfragment)):
spaceship.visible=0
fire.visible=0
#wrap at edge of screen
if thisfragment.pos.x>20 or thisfragment.pos.x<-20:
thisfragment.pos=thisfragment.pos-thisfragment.v*dt
thisfragment.pos.x=-thisfragment.pos.x
if thisfragment.pos.y>20 or thisfragment.pos.y<-20:
thisfragment.pos=thisfragment.pos-thisfragment.v*dt
thisfragment.pos.y=-thisfragment.pos.y
#check for collision with bullets
for thisbullet in bulletsList:
if(collisionSpheres(thisbullet,thisfragment)and thisbullet.visible==1):
thisfragment.visible=0
thisbullet.visible=0
Nleft=0 #have to reset this before counting asteroids and fragments
for thisasteroid in asteroidList:
if thisasteroid.visible:
Nleft=Nleft+1
for thisfragment in fragmentList:
if thisfragment.visible:
Nleft=Nleft+1
#create more asteroids if all are gone
if Nleft==0:
createAsteroids()
#update fire
if thrust==0 or spaceship.visible==0:
fire.visible=0
else:
fire.visible=1
t=t+dt
| [
"atitus@highpoint.edu"
] | atitus@highpoint.edu |
8926cbe8d1538cbbd04bf86bf0af6e92ec04783c | adb295bf248ded84d2c126d73c58b570af440dc6 | /scripts/providers.py | 13d8d431cf8b25bd62662d5e17425d61e6862069 | [] | no_license | sshveta/cfme_tests | eaeaf0076e87dd6c2c960887b242cb435cab5151 | 51bb86fda7d897e90444a6a0380a5aa2c61be6ff | refs/heads/master | 2021-03-30T22:30:12.476326 | 2017-04-26T22:47:25 | 2017-04-26T22:47:25 | 17,754,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | #!/usr/bin/env python
"""
Given the name of a provider from cfme_data and using credentials from
the credentials stash, call the corresponding action on that provider, along
with any additional action arguments.
See cfme_pages/common/mgmt_system.py for documentation on the callable methods
themselves.
Example usage:
scripts/providers.py providername stop_vm vm-name
Note that attempts to be clever will likely be successful, but fruitless.
For example, this will work but not do anyhting helpful:
scripts/providers.py providername __init__ username password
"""
import argparse
import os
import sys
# Make sure the parent dir is on the path before importing provider_factory
cfme_tests_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, cfme_tests_path)
from utils.providers import provider_factory
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('provider_name',
help='provider name in cfme_data')
parser.add_argument('action',
help='action to take (list_vm, stop_vm, delete_vm, etc.)')
parser.add_argument('action_args', nargs='*',
help='foo')
args = parser.parse_args()
try:
result = call_provider(args.provider_name, args.action, *args.action_args)
if isinstance(result, list):
exit = 0
for entry in result:
print entry
elif isinstance(result, str):
exit = 0
print result
elif isinstance(result, bool):
# 'True' result becomes flipped exit 0, and vice versa for False
exit = int(not result)
else:
# Unknown type, explode
raise Exception('Unknown return type for "%s"' % args.action)
except Exception as e:
exit = 1
exc_type = type(e).__name__
if e.message:
sys.stderr.write('%s: %s\n' % (exc_type, e.message))
else:
sys.stderr.write('%s\n' % exc_type)
return exit
def call_provider(provider_name, action, *args):
# Given a provider class, find the named method and call it with
# *args. This could possibly be generalized for other CLI tools.
provider = provider_factory(provider_name)
try:
call = getattr(provider, action)
except AttributeError:
raise Exception('Action "%s" not found' % action)
return call(*args)
if __name__ == '__main__':
sys.exit(main())
| [
"sean.myers@redhat.com"
] | sean.myers@redhat.com |
9394abdf60ce16f65a8a9c354b6dc08b0a490f42 | 3dd0172a975a7048edb6e0e3ea13961f94ead149 | /src/mscgfm_check.py | 083f31012f9e7755dbb953eed40c7fb1e93240f4 | [] | no_license | uchicago-voth/mscg_regression_tests | 9656c26f7299eead0a926d7981ec6b0b82df21fc | 825266138b6c591886f6c345c8bf3aeba1fab46d | refs/heads/master | 2021-06-03T18:25:42.360348 | 2020-06-30T22:58:05 | 2020-06-30T22:58:05 | 97,156,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | import numpy as np
def mscg_content_equality(dat_1,dat_2, prefix="Data File equality: ",xyz_abs_tol=1e-8):
result = True
if not(np.array_equal(dat_1[0], dat_2[0])):
print(prefix+"Warning: Coordinates don't match bit for bit.")
sqdiff_mat = (dat_1[0]-dat_2[0])**2
min_mat = np.minimum(abs(dat_1[0]),abs(dat_2[0]))
residual = sqdiff_mat.mean()**0.5
max_residual = sqdiff_mat.max()**0.5
print(prefix+"Warning: RMS coordinate residual: {}".format(residual))
print(prefix+"Warning: Max coordinate residual: {}".format(max_residual))
rel_residual_mat = sqdiff_mat**0.5/min_mat
residual = rel_residual_mat.mean()
max_residual = rel_residual_mat.max()
print(prefix+"Warning: Mean relative coordinate residual: {}".format(residual))
print(prefix+"Warning: Max relative coordinate residual: {}".format(max_residual))
print(prefix+"First sqdiff coordinate frame: {}".format(\
(dat_1[0]-dat_2[0])))
violations = np.nonzero((dat_1[0]-dat_2[0]) > xyz_abs_tol)
print(prefix+"Indices violating residual ({}): {}".format(\
xyz_abs_tol,violations))
if (residual > xyz_abs_tol):
result=False
if not(np.array_equal(dat_1[1], dat_2[1])):
print(prefix+"Warning: Forces don't match bit for bit.")
sqdiff_mat = (dat_1[1]-dat_2[1])**2
min_mat = np.minimum(abs(dat_1[1]),abs(dat_2[1]))
residual = sqdiff_mat.mean()**0.5
max_residual = sqdiff_mat.max()**0.5
print(prefix+"Warning: RMS Force residual: {}".format(residual))
print(prefix+"Warning: Max Force residual: {}".format(max_residual))
rel_residual_mat = sqdiff_mat**0.5/min_mat
residual = rel_residual_mat.mean()
max_residual = rel_residual_mat.max()
print(prefix+"Warning: Mean relative force residual: {}".format(residual))
print(prefix+"Warning: Max relative force residual: {}".format(max_residual))
print(prefix+"First sqdiff coordinate frame: {}".format(\
(dat_1[1]-dat_2[1])))
violations = np.nonzero((dat_1[1]-dat_2[1]) > xyz_abs_tol)
print(prefix+"Indices violating residual ({}): {}".format(\
xyz_abs_tol,violations))
if (residual > xyz_abs_tol):
result=False
return result
def check_result_to_exitval(result):
'''Transforms boolean to command line exit value.
True -> 0, False -> 1. No guard logic.
'''
return int(not(result))
| [
"mocohen@uchicago.edu"
] | mocohen@uchicago.edu |
3c413b53ad95c4028caa2456ffe78e4b195e2572 | d17c8bf9f9b3a075ed239b7adef4b4130be073a5 | /ecom/api/order/migrations/0002_order_is_paid.py | aee3b0abaec8148f2cc353b9abcd02b505434b1e | [] | no_license | joilshubham/Nerdy-T-shirt-store | 3daf3f45b57160d7aec5686648eb8f70da08854c | a2162d58929284a806b507a506fbb6de8720365d | refs/heads/master | 2023-04-27T20:31:01.077384 | 2021-05-11T02:48:08 | 2021-05-11T02:48:08 | 354,576,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # Generated by Django 3.1.7 on 2021-04-03 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='is_paid',
field=models.BooleanField(default=False),
),
]
| [
"shubham.joil@stits.co"
] | shubham.joil@stits.co |
129f43f2cbae6b867ee668ca2a8e26db25bc6b6a | 1e22dfbff2874d376620cd49cf6b934ceac2a7d3 | /traffic_fluctuation.py | 54b066ec465957a8077d1996920ad69294cf56cd | [] | no_license | rodrigo-tinini/CFRAN-Simulator | 737a296ced2bd2bfe683f20b4c00452301d1c8c0 | 6151eb50f034ffe922b79dde5cc4d33584e066cd | refs/heads/master | 2021-06-22T11:25:00.016319 | 2019-06-26T20:03:47 | 2019-06-26T20:03:47 | 115,743,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | import simpy
import functools
import random as np
import time
from enum import Enum
from scipy.stats import norm
#inter arrival rate of the users requests
arrival_rate = 1
#distribution for arrival of packets
distribution = lambda x: np.expovariate(1/arrival_rate)
#service time of a request
service_time = lambda x: np.randint(1, 600)
#total generated requests per timestamp
total_period_requests = 0
#timestamp to change the load
change_time = 15
#traffic generator - generates requests considering the distribution
class Traffic_Generator(object):
def __init__(self, env, distribution, service, cp):
self.env = env
self.dist = distribution
self.service = service
self.cp = cp
self.req_count = 0
self.action = self.env.process(self.run())
self.load_variation = self.env.process(self.change_load())
#generation of requests
def run(self):
global total_period_requests
while True:
yield self.env.timeout(self.dist(self))
self.req_count += 1
total_period_requests +=1
r = Request(self.env, self.req_count, self.service, self.cp)
self.cp.requests.put(r)
#changing of load
def change_load(self):
while True:
global arrival_rate
global total_period_requests
yield self.env.timeout(change_time)
arrival_rate -= 0.005
print("Arrival rate now is {} at {} and was generated {}".format(arrival_rate, self.env.now, total_period_requests))
total_period_requests = 0
#user request
class Request(object):
def __init__(self, env, aId, service, cp):
self.env = env
self.id = aId
self.service_time = service
self.cp = cp
#self.action = self.env.process(self.run())
#executes this request and send it to deallocation after its service time
def run(self):
yield self.env.timeout(self.service_time(self))
print("Request {} departing".format(self.id))
self.cp.departs.put(self)
#control plane that controls the allocations and deallocations
class Control_Plane(object):
def __init__(self, env):
self.env = env
self.requests = simpy.Store(self.env)
self.departs = simpy.Store(self.env)
self.action = self.env.process(self.run())
#take requests and tries to allocate
def run(self):
while True:
r = yield self.requests.get()
print("Allocating request {}".format(r.id))
self.env.process(r.run())
#starts the deallocation of a request
def depart_request(self):
while True:
r = yield self.departs.get()
print("Deallocating request {}".format(r.id))
env = simpy.Environment()
cp = Control_Plane(env)
t = Traffic_Generator(env, distribution, service_time, cp)
print("\Begin at "+str(env.now))
env.run(until = 3600)
print("Total generated requests {}".format(t.req_count))
print("\End at "+str(env.now)) | [
"tinini.89@gmail.com"
] | tinini.89@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.