text
stringlengths 1
93.6k
|
|---|
# Will loop over resulted elements to get text too to make comparison even more fair otherwise Scrapling will be even faster
|
return [
|
element.text for element in Adaptor(
|
request_html, auto_match=False
|
).find_by_text('Tipping the Velvet', first_match=True).find_similar(ignore_attributes=['title'])
|
]
|
@benchmark
|
def test_autoscraper(request_html):
|
# autoscraper by default returns elements text
|
return AutoScraper().build(html=request_html, wanted_list=['Tipping the Velvet'])
|
if __name__ == "__main__":
|
print(' Benchmark: Speed of parsing and retrieving the text content of 5000 nested elements \n')
|
results1 = {
|
"Raw Lxml": test_lxml(),
|
"Parsel/Scrapy": test_parsel(),
|
"Scrapling": test_scrapling(),
|
'Selectolax': test_selectolax(),
|
"PyQuery": test_pyquery(),
|
"BS4 with Lxml": test_bs4_lxml(),
|
"MechanicalSoup": test_mechanicalsoup(),
|
"BS4 with html5lib": test_bs4_html5lib(),
|
}
|
display(results1)
|
print('\n' + "="*25)
|
req = requests.get('https://books.toscrape.com/index.html')
|
print(
|
' Benchmark: Speed of searching for an element by text content, and retrieving the text of similar elements\n'
|
)
|
results2 = {
|
"Scrapling": test_scrapling_text(req.text),
|
"AutoScraper": test_autoscraper(req.text),
|
}
|
display(results2)
|
# <FILESEP>
|
import tensorflow as tf
|
import numpy as np
|
import random, os
|
from tensorflow.contrib import slim
|
import cv2
|
class ImageData:
|
def __init__(self, img_height, img_width, channels, augment_flag):
|
self.img_height = img_height
|
self.img_width = img_width
|
self.channels = channels
|
self.augment_flag = augment_flag
|
def image_processing(self, filename):
|
x = tf.read_file(filename)
|
x_decode = tf.image.decode_jpeg(x, channels=self.channels, dct_method='INTEGER_ACCURATE')
|
img = tf.image.resize_images(x_decode, [self.img_height, self.img_width])
|
img = tf.cast(img, tf.float32) / 127.5 - 1
|
if self.augment_flag :
|
augment_height = self.img_height + (30 if self.img_height == 256 else int(self.img_height * 0.1))
|
augment_width = self.img_width + (30 if self.img_width == 256 else int(self.img_width * 0.1))
|
img = tf.cond(pred=tf.greater_equal(tf.random_uniform(shape=[], minval=0.0, maxval=1.0), 0.5),
|
true_fn=lambda: augmentation(img, augment_height, augment_width),
|
false_fn=lambda: img)
|
return img
|
def load_test_image(image_path, img_width, img_height, img_channel):
|
if img_channel == 1 :
|
img = cv2.imread(image_path, flags=cv2.IMREAD_GRAYSCALE)
|
else :
|
img = cv2.imread(image_path, flags=cv2.IMREAD_COLOR)
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
img = cv2.resize(img, dsize=(img_width, img_height))
|
if img_channel == 1 :
|
img = np.expand_dims(img, axis=0)
|
img = np.expand_dims(img, axis=-1)
|
else :
|
img = np.expand_dims(img, axis=0)
|
img = img/127.5 - 1
|
return img
|
def augmentation(image, augment_height, augment_width):
|
seed = random.randint(0, 2 ** 31 - 1)
|
ori_image_shape = tf.shape(image)
|
image = tf.image.random_flip_left_right(image, seed=seed)
|
image = tf.image.resize_images(image, [augment_height, augment_width])
|
image = tf.random_crop(image, ori_image_shape, seed=seed)
|
return image
|
def save_images(images, size, image_path):
|
return imsave(inverse_transform(images), size, image_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.