_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q19600
|
Utils.wait_until_element_stops
|
train
|
def wait_until_element_stops(self, element, times=1000, timeout=None):
"""Search element and wait until it has stopped moving
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param times: number of iterations checking the element's location that must be the same for all of them
in order to considering the element has stopped
:returns: the web element if the element is stopped
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element does not stop after the timeout
"""
return self._wait_until(self._expected_condition_find_element_stopped, (element, times), timeout)
|
python
|
{
"resource": ""
}
|
q19601
|
Utils.wait_until_element_contains_text
|
train
|
def wait_until_element_contains_text(self, element, text, timeout=None):
"""Search element and wait until it contains the expected text
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param text: text expected to be contained into the element
:param timeout: max time to wait
:returns: the web element if it contains the expected text
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element does not contain the expected text after the timeout
"""
return self._wait_until(self._expected_condition_find_element_containing_text, (element, text), timeout)
|
python
|
{
"resource": ""
}
|
q19602
|
Utils.wait_until_element_not_contain_text
|
train
|
def wait_until_element_not_contain_text(self, element, text, timeout=None):
"""Search element and wait until it does not contain the expected text
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param text: text expected to be contained into the element
:param timeout: max time to wait
:returns: the web element if it does not contain the given text
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element contains the expected text after the timeout
"""
return self._wait_until(self._expected_condition_find_element_not_containing_text, (element, text), timeout)
|
python
|
{
"resource": ""
}
|
q19603
|
Utils.wait_until_element_attribute_is
|
train
|
def wait_until_element_attribute_is(self, element, attribute, value, timeout=None):
"""Search element and wait until the requested attribute contains the expected value
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param attribute: attribute belonging to the element
:param value: expected value for the attribute of the element
:param timeout: max time to wait
:returns: the web element if the element's attribute contains the expected value
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element's attribute does not contain the expected value after the timeout
"""
return self._wait_until(self._expected_condition_value_in_element_attribute, (element, attribute, value), timeout)
|
python
|
{
"resource": ""
}
|
q19604
|
Utils.get_remote_node
|
train
|
def get_remote_node(self):
"""Return the remote node that it's executing the actual test session
:returns: tuple with server type (local, grid, ggr, selenium) and remote node name
"""
logging.getLogger("requests").setLevel(logging.WARNING)
remote_node = None
server_type = 'local'
if self.driver_wrapper.config.getboolean_optional('Server', 'enabled'):
# Request session info from grid hub
session_id = self.driver_wrapper.driver.session_id
self.logger.debug("Trying to identify remote node")
try:
# Request session info from grid hub and extract remote node
url = '{}/grid/api/testsession?session={}'.format(self.get_server_url(),
session_id)
proxy_id = requests.get(url).json()['proxyId']
remote_node = urlparse(proxy_id).hostname if urlparse(proxy_id).hostname else proxy_id
server_type = 'grid'
self.logger.debug("Test running in remote node %s", remote_node)
except (ValueError, KeyError):
try:
# Request session info from GGR and extract remote node
from toolium.selenoid import Selenoid
remote_node = Selenoid(self.driver_wrapper).get_selenoid_info()['Name']
server_type = 'ggr'
self.logger.debug("Test running in a GGR remote node %s", remote_node)
except Exception:
try:
# The remote node is a Selenoid node
url = '{}/status'.format(self.get_server_url())
requests.get(url).json()['total']
remote_node = self.driver_wrapper.config.get('Server', 'host')
server_type = 'selenoid'
self.logger.debug("Test running in a Selenoid node %s", remote_node)
except Exception:
# The remote node is not a grid node or the session has been closed
remote_node = self.driver_wrapper.config.get('Server', 'host')
server_type = 'selenium'
self.logger.debug("Test running in a Selenium node %s", remote_node)
return server_type, remote_node
|
python
|
{
"resource": ""
}
|
q19605
|
Utils.get_server_url
|
train
|
def get_server_url(self):
"""Return the configured server url
:returns: server url
"""
server_host = self.driver_wrapper.config.get('Server', 'host')
server_port = self.driver_wrapper.config.get('Server', 'port')
server_username = self.driver_wrapper.config.get_optional('Server', 'username')
server_password = self.driver_wrapper.config.get_optional('Server', 'password')
server_auth = '{}:{}@'.format(server_username, server_password) if server_username and server_password else ''
server_url = 'http://{}{}:{}'.format(server_auth, server_host, server_port)
return server_url
|
python
|
{
"resource": ""
}
|
q19606
|
Utils.download_remote_video
|
train
|
def download_remote_video(self, remote_node, session_id, video_name):
"""Download the video recorded in the remote node during the specified test session and save it in videos folder
:param remote_node: remote node name
:param session_id: test session id
:param video_name: video name
"""
try:
video_url = self._get_remote_video_url(remote_node, session_id)
except requests.exceptions.ConnectionError:
self.logger.warning("Remote server seems not to have video capabilities")
return
if not video_url:
self.logger.warning("Test video not found in node '%s'", remote_node)
return
self._download_video(video_url, video_name)
|
python
|
{
"resource": ""
}
|
q19607
|
Utils._get_remote_node_url
|
train
|
def _get_remote_node_url(self, remote_node):
"""Get grid-extras url of a node
:param remote_node: remote node name
:returns: grid-extras url
"""
logging.getLogger("requests").setLevel(logging.WARNING)
gridextras_port = 3000
return 'http://{}:{}'.format(remote_node, gridextras_port)
|
python
|
{
"resource": ""
}
|
q19608
|
Utils._get_remote_video_url
|
train
|
def _get_remote_video_url(self, remote_node, session_id):
"""Get grid-extras url to download videos
:param remote_node: remote node name
:param session_id: test session id
:returns: grid-extras url to download videos
"""
url = '{}/video'.format(self._get_remote_node_url(remote_node))
timeout = time.time() + 5 # 5 seconds from now
# Requests videos list until timeout or the video url is found
video_url = None
while time.time() < timeout:
response = requests.get(url).json()
try:
video_url = response['available_videos'][session_id]['download_url']
break
except KeyError:
time.sleep(1)
return video_url
|
python
|
{
"resource": ""
}
|
q19609
|
Utils._download_video
|
train
|
def _download_video(self, video_url, video_name):
"""Download a video from the remote node
:param video_url: video url
:param video_name: video name
"""
filename = '{0:0=2d}_{1}'.format(DriverWrappersPool.videos_number, video_name)
filename = '{}.mp4'.format(get_valid_filename(filename))
filepath = os.path.join(DriverWrappersPool.videos_directory, filename)
if not os.path.exists(DriverWrappersPool.videos_directory):
os.makedirs(DriverWrappersPool.videos_directory)
response = requests.get(video_url)
open(filepath, 'wb').write(response.content)
self.logger.info("Video saved in '%s'", filepath)
DriverWrappersPool.videos_number += 1
|
python
|
{
"resource": ""
}
|
q19610
|
Utils.is_remote_video_enabled
|
train
|
def is_remote_video_enabled(self, remote_node):
"""Check if the remote node has the video recorder enabled
:param remote_node: remote node name
:returns: true if it has the video recorder enabled
"""
enabled = False
if remote_node:
url = '{}/config'.format(self._get_remote_node_url(remote_node))
try:
response = requests.get(url, timeout=5).json()
record_videos = response['config_runtime']['theConfigMap']['video_recording_options'][
'record_test_videos']
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, KeyError):
record_videos = 'false'
if record_videos == 'true':
# Wait to the video recorder start
time.sleep(1)
enabled = True
return enabled
|
python
|
{
"resource": ""
}
|
q19611
|
Utils.get_center
|
train
|
def get_center(self, element):
"""Get center coordinates of an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: dict with center coordinates
"""
web_element = self.get_web_element(element)
location = web_element.location
size = web_element.size
return {'x': location['x'] + (size['width'] / 2), 'y': location['y'] + (size['height'] / 2)}
|
python
|
{
"resource": ""
}
|
q19612
|
Utils.get_safari_navigation_bar_height
|
train
|
def get_safari_navigation_bar_height(self):
"""Get the height of Safari navigation bar
:returns: height of navigation bar
"""
status_bar_height = 0
if self.driver_wrapper.is_ios_test() and self.driver_wrapper.is_web_test():
# ios 7.1, 8.3
status_bar_height = 64
return status_bar_height
|
python
|
{
"resource": ""
}
|
q19613
|
Utils.get_window_size
|
train
|
def get_window_size(self):
"""Generic method to get window size using a javascript workaround for Android web tests
:returns: dict with window width and height
"""
if not self._window_size:
if self.driver_wrapper.is_android_web_test() and self.driver_wrapper.driver.current_context != 'NATIVE_APP':
window_width = self.driver_wrapper.driver.execute_script("return window.innerWidth")
window_height = self.driver_wrapper.driver.execute_script("return window.innerHeight")
self._window_size = {'width': window_width, 'height': window_height}
else:
self._window_size = self.driver_wrapper.driver.get_window_size()
return self._window_size
|
python
|
{
"resource": ""
}
|
q19614
|
Utils.get_native_coords
|
train
|
def get_native_coords(self, coords):
"""Convert web coords into native coords. Assumes that the initial context is WEBVIEW and switches to
NATIVE_APP context.
:param coords: dict with web coords, e.g. {'x': 10, 'y': 10}
:returns: dict with native coords
"""
web_window_size = self.get_window_size()
self.driver_wrapper.driver.switch_to.context('NATIVE_APP')
native_window_size = self.driver_wrapper.driver.get_window_size()
scale = native_window_size['width'] / web_window_size['width']
offset_y = self.get_safari_navigation_bar_height()
native_coords = {'x': coords['x'] * scale, 'y': coords['y'] * scale + offset_y}
self.logger.debug('Converted web coords %s into native coords %s', coords, native_coords)
return native_coords
|
python
|
{
"resource": ""
}
|
q19615
|
Utils.swipe
|
train
|
def swipe(self, element, x, y, duration=None):
"""Swipe over an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:param x: horizontal movement
:param y: vertical movement
:param duration: time to take the swipe, in ms
"""
if not self.driver_wrapper.is_mobile_test():
raise Exception('Swipe method is not implemented in Selenium')
# Get center coordinates of element
center = self.get_center(element)
initial_context = self.driver_wrapper.driver.current_context
if self.driver_wrapper.is_web_test() or initial_context != 'NATIVE_APP':
center = self.get_native_coords(center)
# Android needs absolute end coordinates and ios needs movement
end_x = x if self.driver_wrapper.is_ios_test() else center['x'] + x
end_y = y if self.driver_wrapper.is_ios_test() else center['y'] + y
self.driver_wrapper.driver.swipe(center['x'], center['y'], end_x, end_y, duration)
if self.driver_wrapper.is_web_test() or initial_context != 'NATIVE_APP':
self.driver_wrapper.driver.switch_to.context(initial_context)
|
python
|
{
"resource": ""
}
|
q19616
|
Utils.get_web_element
|
train
|
def get_web_element(self, element):
"""Return the web element from a page element or its locator
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: WebElement object
"""
from toolium.pageelements.page_element import PageElement
if isinstance(element, WebElement):
web_element = element
elif isinstance(element, PageElement):
web_element = element.web_element
elif isinstance(element, tuple):
web_element = self.driver_wrapper.driver.find_element(*element)
else:
web_element = None
return web_element
|
python
|
{
"resource": ""
}
|
q19617
|
Utils.get_first_webview_context
|
train
|
def get_first_webview_context(self):
"""Return the first WEBVIEW context or raise an exception if it is not found
:returns: first WEBVIEW context
"""
for context in self.driver_wrapper.driver.contexts:
if context.startswith('WEBVIEW'):
return context
raise Exception('No WEBVIEW context has been found')
|
python
|
{
"resource": ""
}
|
q19618
|
DriverWrappersPool.capture_screenshots
|
train
|
def capture_screenshots(cls, name):
"""Capture a screenshot in each driver
:param name: screenshot name suffix
"""
screenshot_name = '{}_driver{}' if len(cls.driver_wrappers) > 1 else '{}'
driver_index = 1
for driver_wrapper in cls.driver_wrappers:
if not driver_wrapper.driver:
continue
from toolium.jira import add_attachment
try:
add_attachment(driver_wrapper.utils.capture_screenshot(screenshot_name.format(name, driver_index)))
except Exception:
# Capture exceptions to avoid errors in teardown method due to session timeouts
pass
driver_index += 1
|
python
|
{
"resource": ""
}
|
q19619
|
DriverWrappersPool.connect_default_driver_wrapper
|
train
|
def connect_default_driver_wrapper(cls, config_files=None):
"""Get default driver wrapper, configure it and connect driver
:param config_files: driver wrapper specific config files
:returns: default driver wrapper
:rtype: toolium.driver_wrapper.DriverWrapper
"""
driver_wrapper = cls.get_default_wrapper()
if not driver_wrapper.driver:
config_files = DriverWrappersPool.initialize_config_files(config_files)
driver_wrapper.configure(config_files)
driver_wrapper.connect()
return driver_wrapper
|
python
|
{
"resource": ""
}
|
q19620
|
DriverWrappersPool.close_drivers
|
train
|
def close_drivers(cls, scope, test_name, test_passed=True, context=None):
"""Stop all drivers, capture screenshots, copy webdriver and GGR logs and download saved videos
:param scope: execution scope (function, module, class or session)
:param test_name: executed test name
:param test_passed: True if the test has passed
:param context: behave context
"""
if scope == 'function':
# Capture screenshot on error
if not test_passed:
cls.capture_screenshots(test_name)
# Execute behave dynamic environment
if context and hasattr(context, 'dyn_env'):
context.dyn_env.execute_after_scenario_steps(context)
# Save webdriver logs on error or if it is enabled
cls.save_all_webdriver_logs(test_name, test_passed)
# Close browser and stop driver if it must not be reused
reuse_driver = cls.get_default_wrapper().should_reuse_driver(scope, test_passed, context)
cls.stop_drivers(reuse_driver)
cls.download_videos(test_name, test_passed, reuse_driver)
cls.save_all_ggr_logs(test_name, test_passed)
cls.remove_drivers(reuse_driver)
|
python
|
{
"resource": ""
}
|
q19621
|
DriverWrappersPool.stop_drivers
|
train
|
def stop_drivers(cls, maintain_default=False):
"""Stop all drivers except default if it should be reused
:param maintain_default: True if the default driver should not be closed
"""
# Exclude first wrapper if the driver must be reused
driver_wrappers = cls.driver_wrappers[1:] if maintain_default else cls.driver_wrappers
for driver_wrapper in driver_wrappers:
if not driver_wrapper.driver:
continue
try:
driver_wrapper.driver.quit()
except Exception as e:
driver_wrapper.logger.warn(
"Capture exceptions to avoid errors in teardown method due to session timeouts: \n %s" % e)
|
python
|
{
"resource": ""
}
|
q19622
|
DriverWrappersPool.download_videos
|
train
|
def download_videos(cls, name, test_passed=True, maintain_default=False):
"""Download saved videos if video is enabled or if test fails
:param name: destination file name
:param test_passed: True if the test has passed
:param maintain_default: True if the default driver should not be closed
"""
# Exclude first wrapper if the driver must be reused
driver_wrappers = cls.driver_wrappers[1:] if maintain_default else cls.driver_wrappers
video_name = '{}_driver{}' if len(driver_wrappers) > 1 else '{}'
video_name = video_name if test_passed else 'error_{}'.format(video_name)
driver_index = 1
for driver_wrapper in driver_wrappers:
if not driver_wrapper.driver:
continue
try:
# Download video if necessary (error case or enabled video)
if (not test_passed or driver_wrapper.config.getboolean_optional('Server', 'video_enabled', False)) \
and driver_wrapper.remote_node_video_enabled:
if driver_wrapper.server_type in ['ggr', 'selenoid']:
name = get_valid_filename(video_name.format(name, driver_index))
Selenoid(driver_wrapper).download_session_video(name)
elif driver_wrapper.server_type == 'grid':
# Download video from Grid Extras
driver_wrapper.utils.download_remote_video(driver_wrapper.remote_node,
driver_wrapper.session_id,
video_name.format(name, driver_index))
except Exception as exc:
# Capture exceptions to avoid errors in teardown method due to session timeouts
driver_wrapper.logger.warn('Error downloading videos: %s' % exc)
driver_index += 1
|
python
|
{
"resource": ""
}
|
q19623
|
DriverWrappersPool.save_all_webdriver_logs
|
train
|
def save_all_webdriver_logs(cls, test_name, test_passed):
"""Get all webdriver logs of each driver and write them to log files
:param test_name: test that has generated these logs
:param test_passed: True if the test has passed
"""
log_name = '{} [driver {}]' if len(cls.driver_wrappers) > 1 else '{}'
driver_index = 1
for driver_wrapper in cls.driver_wrappers:
if not driver_wrapper.driver or driver_wrapper.server_type in ['ggr', 'selenoid']:
continue
if driver_wrapper.config.getboolean_optional('Server', 'logs_enabled') or not test_passed:
try:
driver_wrapper.utils.save_webdriver_logs(log_name.format(test_name, driver_index))
except Exception as exc:
# Capture exceptions to avoid errors in teardown method due to session timeouts
driver_wrapper.logger.warn('Error downloading webdriver logs: %s' % exc)
driver_index += 1
|
python
|
{
"resource": ""
}
|
q19624
|
DriverWrappersPool.save_all_ggr_logs
|
train
|
def save_all_ggr_logs(cls, test_name, test_passed):
"""Get all GGR logs of each driver and write them to log files
:param test_name: test that has generated these logs
:param test_passed: True if the test has passed
"""
log_name = '{} [driver {}]' if len(cls.driver_wrappers) > 1 else '{}'
driver_index = 1
for driver_wrapper in cls.driver_wrappers:
if not driver_wrapper.driver or driver_wrapper.server_type not in ['ggr', 'selenoid']:
continue
try:
if driver_wrapper.config.getboolean_optional('Server', 'logs_enabled') or not test_passed:
name = get_valid_filename(log_name.format(test_name, driver_index))
Selenoid(driver_wrapper).download_session_log(name)
except Exception as exc:
# Capture exceptions to avoid errors in teardown method due to session timeouts
driver_wrapper.logger.warn('Error downloading GGR logs: %s' % exc)
driver_index += 1
|
python
|
{
"resource": ""
}
|
q19625
|
DriverWrappersPool.get_configured_value
|
train
|
def get_configured_value(system_property_name, specific_value, default_value):
"""Get configured value from system properties, method parameters or default value
:param system_property_name: system property name
:param specific_value: test case specific value
:param default_value: default value
:returns: configured value
"""
try:
return os.environ[system_property_name]
except KeyError:
return specific_value if specific_value else default_value
|
python
|
{
"resource": ""
}
|
q19626
|
DriverWrappersPool.configure_common_directories
|
train
|
def configure_common_directories(cls, tc_config_files):
"""Configure common config and output folders for all tests
:param tc_config_files: test case specific config files
"""
if cls.config_directory is None:
# Get config directory from properties
config_directory = cls.get_configured_value('Config_directory', tc_config_files.config_directory, 'conf')
prop_filenames = cls.get_configured_value('Config_prop_filenames',
tc_config_files.config_properties_filenames, 'properties.cfg')
cls.config_directory = cls._find_parent_directory(config_directory, prop_filenames.split(';')[0])
# Get output directory from properties and create it
cls.output_directory = cls.get_configured_value('Output_directory', tc_config_files.output_directory,
'output')
if not os.path.isabs(cls.output_directory):
# If output directory is relative, we use the same path as config directory
cls.output_directory = os.path.join(os.path.dirname(cls.config_directory), cls.output_directory)
if not os.path.exists(cls.output_directory):
os.makedirs(cls.output_directory)
# Get visual baseline directory from properties
default_baseline = os.path.join(cls.output_directory, 'visualtests', 'baseline')
cls.visual_baseline_directory = cls.get_configured_value('Visual_baseline_directory',
tc_config_files.visual_baseline_directory,
default_baseline)
if not os.path.isabs(cls.visual_baseline_directory):
# If baseline directory is relative, we use the same path as config directory
cls.visual_baseline_directory = os.path.join(os.path.dirname(cls.config_directory),
cls.visual_baseline_directory)
|
python
|
{
"resource": ""
}
|
q19627
|
DriverWrappersPool.get_default_config_directory
|
train
|
def get_default_config_directory():
"""Return default config directory, based in the actual test path
:returns: default config directory
"""
test_path = os.path.dirname(os.path.realpath(inspect.getouterframes(inspect.currentframe())[2][1]))
return os.path.join(test_path, 'conf')
|
python
|
{
"resource": ""
}
|
q19628
|
DriverWrappersPool._find_parent_directory
|
train
|
def _find_parent_directory(directory, filename):
"""Find a directory in parent tree with a specific filename
:param directory: directory name to find
:param filename: filename to find
:returns: absolute directory path
"""
parent_directory = directory
absolute_directory = '.'
while absolute_directory != os.path.abspath(parent_directory):
absolute_directory = os.path.abspath(parent_directory)
if os.path.isfile(os.path.join(absolute_directory, filename)):
return absolute_directory
if os.path.isabs(parent_directory):
parent_directory = os.path.join(os.path.dirname(parent_directory), '..',
os.path.basename(parent_directory))
else:
parent_directory = os.path.join('..', parent_directory)
return os.path.abspath(directory)
|
python
|
{
"resource": ""
}
|
q19629
|
DriverWrappersPool.configure_visual_directories
|
train
|
def configure_visual_directories(cls, driver_info):
"""Configure screenshots, videos and visual directories
:param driver_info: driver property value to name folders
"""
if cls.screenshots_directory is None:
# Unique screenshots and videos directories
date = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
folder_name = '%s_%s' % (date, driver_info) if driver_info else date
folder_name = get_valid_filename(folder_name)
cls.screenshots_directory = os.path.join(cls.output_directory, 'screenshots', folder_name)
cls.screenshots_number = 1
cls.videos_directory = os.path.join(cls.output_directory, 'videos', folder_name)
cls.logs_directory = os.path.join(cls.output_directory, 'logs', folder_name)
cls.videos_number = 1
# Unique visualtests directories
cls.visual_output_directory = os.path.join(cls.output_directory, 'visualtests', folder_name)
cls.visual_number = 1
|
python
|
{
"resource": ""
}
|
q19630
|
DriverWrappersPool.initialize_config_files
|
train
|
def initialize_config_files(tc_config_files=None):
"""Initialize config files and update config files names with the environment
:param tc_config_files: test case specific config files
:returns: initialized config files object
"""
# Initialize config files
if tc_config_files is None:
tc_config_files = ConfigFiles()
# Update properties and log file names if an environment is configured
env = DriverWrappersPool.get_configured_value('Config_environment', None, None)
if env:
# Update config properties filenames
prop_filenames = tc_config_files.config_properties_filenames
new_prop_filenames_list = prop_filenames.split(';') if prop_filenames else ['properties.cfg']
base, ext = os.path.splitext(new_prop_filenames_list[0])
new_prop_filenames_list.append('{}-{}{}'.format(env, base, ext))
new_prop_filenames_list.append('local-{}-{}{}'.format(env, base, ext))
tc_config_files.set_config_properties_filenames(*new_prop_filenames_list)
# Update output log filename
output_log_filename = tc_config_files.output_log_filename
base, ext = os.path.splitext(output_log_filename) if output_log_filename else ('toolium', '.log')
tc_config_files.set_output_log_filename('{}_{}{}'.format(base, env, ext))
return tc_config_files
|
python
|
{
"resource": ""
}
|
q19631
|
before_all
|
train
|
def before_all(context):
"""Initialization method that will be executed before the test execution
:param context: behave context
"""
# Use pytest asserts if behave_pytest is installed
install_pytest_asserts()
# Get 'Config_environment' property from user input (e.g. -D Config_environment=ios)
env = context.config.userdata.get('Config_environment')
# Deprecated: Get 'env' property from user input (e.g. -D env=ios)
env = env if env else context.config.userdata.get('env')
if env:
os.environ['Config_environment'] = env
if not hasattr(context, 'config_files'):
context.config_files = ConfigFiles()
context.config_files = DriverWrappersPool.initialize_config_files(context.config_files)
# By default config directory is located in environment path
if not context.config_files.config_directory:
context.config_files.set_config_directory(DriverWrappersPool.get_default_config_directory())
context.global_status = {'test_passed': True}
create_and_configure_wrapper(context)
# Behave dynamic environment
context.dyn_env = DynamicEnvironment(logger=context.logger)
|
python
|
{
"resource": ""
}
|
q19632
|
bdd_common_before_scenario
|
train
|
def bdd_common_before_scenario(context_or_world, scenario, no_driver=False):
"""Common scenario initialization in behave or lettuce
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
:param no_driver: True if this is an api test and driver should not be started
"""
# Initialize and connect driver wrapper
start_driver(context_or_world, no_driver)
# Add assert screenshot methods with scenario configuration
add_assert_screenshot_methods(context_or_world, scenario)
# Configure Jira properties
save_jira_conf()
context_or_world.logger.info("Running new scenario: %s", scenario.name)
|
python
|
{
"resource": ""
}
|
q19633
|
create_and_configure_wrapper
|
train
|
def create_and_configure_wrapper(context_or_world):
"""Create and configure driver wrapper in behave or lettuce tests
:param context_or_world: behave context or lettuce world
"""
# Create default driver wrapper
context_or_world.driver_wrapper = DriverWrappersPool.get_default_wrapper()
context_or_world.utils = context_or_world.driver_wrapper.utils
# Get behave userdata properties to override config properties
try:
behave_properties = context_or_world.config.userdata
except AttributeError:
behave_properties = None
# Configure wrapper
context_or_world.driver_wrapper.configure(context_or_world.config_files, behave_properties=behave_properties)
# Copy config object
context_or_world.toolium_config = context_or_world.driver_wrapper.config
# Configure logger
context_or_world.logger = logging.getLogger(__name__)
|
python
|
{
"resource": ""
}
|
q19634
|
connect_wrapper
|
train
|
def connect_wrapper(context_or_world):
"""Connect driver in behave or lettuce tests
:param context_or_world: behave context or lettuce world
"""
# Create driver if it is not already created
if context_or_world.driver_wrapper.driver:
context_or_world.driver = context_or_world.driver_wrapper.driver
else:
context_or_world.driver = context_or_world.driver_wrapper.connect()
# Copy app_strings object
context_or_world.app_strings = context_or_world.driver_wrapper.app_strings
|
python
|
{
"resource": ""
}
|
q19635
|
add_assert_screenshot_methods
|
train
|
def add_assert_screenshot_methods(context_or_world, scenario):
"""Add assert screenshot methods to behave or lettuce object
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
"""
file_suffix = scenario.name
def assert_screenshot(element_or_selector, filename, threshold=0, exclude_elements=[], driver_wrapper=None,
force=False):
VisualTest(driver_wrapper, force).assert_screenshot(element_or_selector, filename, file_suffix, threshold,
exclude_elements)
def assert_full_screenshot(filename, threshold=0, exclude_elements=[], driver_wrapper=None, force=False):
VisualTest(driver_wrapper, force).assert_screenshot(None, filename, file_suffix, threshold, exclude_elements)
# Monkey patching assert_screenshot method in PageElement to use the correct test name
def assert_screenshot_page_element(self, filename, threshold=0, exclude_elements=[], force=False):
VisualTest(self.driver_wrapper, force).assert_screenshot(self.web_element, filename, file_suffix, threshold,
exclude_elements)
context_or_world.assert_screenshot = assert_screenshot
context_or_world.assert_full_screenshot = assert_full_screenshot
PageElement.assert_screenshot = assert_screenshot_page_element
|
python
|
{
"resource": ""
}
|
q19636
|
bdd_common_after_scenario
|
train
|
def bdd_common_after_scenario(context_or_world, scenario, status):
"""Clean method that will be executed after each scenario in behave or lettuce
:param context_or_world: behave context or lettuce world
:param scenario: running scenario
:param status: scenario status (passed, failed or skipped)
"""
if status == 'skipped':
return
elif status == 'passed':
test_status = 'Pass'
test_comment = None
context_or_world.logger.info("The scenario '%s' has passed", scenario.name)
else:
test_status = 'Fail'
test_comment = "The scenario '%s' has failed" % scenario.name
context_or_world.logger.error("The scenario '%s' has failed", scenario.name)
context_or_world.global_status['test_passed'] = False
# Close drivers
DriverWrappersPool.close_drivers(scope='function', test_name=scenario.name, test_passed=status == 'passed',
context=context_or_world)
# Save test status to be updated later
add_jira_status(get_jira_key_from_scenario(scenario), test_status, test_comment)
|
python
|
{
"resource": ""
}
|
q19637
|
after_feature
|
train
|
def after_feature(context, feature):
"""Clean method that will be executed after each feature
:param context: behave context
:param feature: running feature
"""
# Behave dynamic environment
context.dyn_env.execute_after_feature_steps(context)
# Close drivers
DriverWrappersPool.close_drivers(scope='module', test_name=feature.name,
test_passed=context.global_status['test_passed'])
|
python
|
{
"resource": ""
}
|
q19638
|
bdd_common_after_all
|
train
|
def bdd_common_after_all(context_or_world):
"""Common after all method in behave or lettuce
:param context_or_world: behave context or lettuce world
"""
# Close drivers
DriverWrappersPool.close_drivers(scope='session', test_name='multiple_tests',
test_passed=context_or_world.global_status['test_passed'])
# Update tests status in Jira
change_all_jira_status()
|
python
|
{
"resource": ""
}
|
q19639
|
DriverWrapper.configure_logger
|
train
|
def configure_logger(self, tc_config_log_filename=None, tc_output_log_filename=None):
"""Configure selenium instance logger
:param tc_config_log_filename: test case specific logging config file
:param tc_output_log_filename: test case specific output logger file
"""
# Get config logger filename
config_log_filename = DriverWrappersPool.get_configured_value('Config_log_filename', tc_config_log_filename,
'logging.conf')
config_log_filename = os.path.join(DriverWrappersPool.config_directory, config_log_filename)
# Configure logger only if logging filename has changed
if self.config_log_filename != config_log_filename:
# Get output logger filename
output_log_filename = DriverWrappersPool.get_configured_value('Output_log_filename', tc_output_log_filename,
'toolium.log')
output_log_filename = os.path.join(DriverWrappersPool.output_directory, output_log_filename)
output_log_filename = output_log_filename.replace('\\', '\\\\')
try:
logging.config.fileConfig(config_log_filename, {'logfilename': output_log_filename}, False)
except Exception as exc:
print("[WARN] Error reading logging config file '{}': {}".format(config_log_filename, exc))
self.config_log_filename = config_log_filename
self.output_log_filename = output_log_filename
self.logger = logging.getLogger(__name__)
|
python
|
{
"resource": ""
}
|
q19640
|
DriverWrapper.configure_properties
|
train
|
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties)
|
python
|
{
"resource": ""
}
|
q19641
|
DriverWrapper.configure_visual_baseline
|
train
|
def configure_visual_baseline(self):
"""Configure baseline directory"""
# Get baseline name
baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}')
for section in self.config.sections():
for option in self.config.options(section):
option_value = self.config.get(section, option)
baseline_name = baseline_name.replace('{{{0}_{1}}}'.format(section, option), option_value)
# Configure baseline directory if baseline name has changed
if self.baseline_name != baseline_name:
self.baseline_name = baseline_name
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
get_valid_filename(baseline_name))
|
python
|
{
"resource": ""
}
|
q19642
|
DriverWrapper.update_visual_baseline
|
train
|
def update_visual_baseline(self):
"""Configure baseline directory after driver is created"""
# Update baseline with real platformVersion value
if '{PlatformVersion}' in self.baseline_name:
try:
platform_version = self.driver.desired_capabilities['platformVersion']
except KeyError:
platform_version = None
self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with real version value
if '{Version}' in self.baseline_name:
try:
splitted_version = self.driver.desired_capabilities['version'].split('.')
version = '.'.join(splitted_version[:2])
except KeyError:
version = None
self.baseline_name = self.baseline_name.replace('{Version}', str(version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with remote node value
if '{RemoteNode}' in self.baseline_name:
self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
|
python
|
{
"resource": ""
}
|
q19643
|
DriverWrapper.configure
|
train
|
def configure(self, tc_config_files, is_selenium_test=True, behave_properties=None):
"""Configure initial selenium instance using logging and properties files for Selenium or Appium tests
:param tc_config_files: test case specific config files
:param is_selenium_test: true if test is a selenium or appium test case
:param behave_properties: dict with behave user data properties
"""
# Configure config and output directories
DriverWrappersPool.configure_common_directories(tc_config_files)
# Configure logger
self.configure_logger(tc_config_files.config_log_filename, tc_config_files.output_log_filename)
# Initialize the config object
self.configure_properties(tc_config_files.config_properties_filenames, behave_properties)
# Configure visual directories
if is_selenium_test:
driver_info = self.config.get('Driver', 'type')
DriverWrappersPool.configure_visual_directories(driver_info)
self.configure_visual_baseline()
|
python
|
{
"resource": ""
}
|
q19644
|
DriverWrapper.connect
|
train
|
def connect(self, maximize=True):
"""Set up the selenium driver and connect to the server
:param maximize: True if the driver should be maximized
:returns: selenium driver
"""
if not self.config.get('Driver', 'type') or self.config.get('Driver', 'type') in ['api', 'no_driver']:
return None
self.driver = ConfigDriver(self.config, self.utils).create_driver()
# Save session id and remote node to download video after the test execution
self.session_id = self.driver.session_id
self.server_type, self.remote_node = self.utils.get_remote_node()
if self.server_type == 'grid':
self.remote_node_video_enabled = self.utils.is_remote_video_enabled(self.remote_node)
else:
self.remote_node_video_enabled = True if self.server_type in ['ggr', 'selenoid'] else False
# Save app_strings in mobile tests
if self.is_mobile_test() and not self.is_web_test() and self.config.getboolean_optional('Driver',
'appium_app_strings'):
self.app_strings = self.driver.app_strings()
if self.is_maximizable():
# Bounds and screen
bounds_x, bounds_y = self.get_config_window_bounds()
self.driver.set_window_position(bounds_x, bounds_y)
self.logger.debug('Window bounds: %s x %s', bounds_x, bounds_y)
# Maximize browser
if maximize:
# Set window size or maximize
window_width = self.config.get_optional('Driver', 'window_width')
window_height = self.config.get_optional('Driver', 'window_height')
if window_width and window_height:
self.driver.set_window_size(window_width, window_height)
else:
self.driver.maximize_window()
# Log window size
window_size = self.utils.get_window_size()
self.logger.debug('Window size: %s x %s', window_size['width'], window_size['height'])
# Update baseline
self.update_visual_baseline()
# Discard previous logcat logs
self.utils.discard_logcat_logs()
# Set implicitly wait timeout
self.utils.set_implicitly_wait()
return self.driver
|
python
|
{
"resource": ""
}
|
q19645
|
DriverWrapper.get_config_window_bounds
|
train
|
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warn('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
|
python
|
{
"resource": ""
}
|
q19646
|
DriverWrapper.should_reuse_driver
|
train
|
def should_reuse_driver(self, scope, test_passed, context=None):
"""Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused
"""
reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver')
reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session')
restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or
self.config.getboolean_optional('Driver', 'restart_driver_fail'))
if context and scope == 'function':
reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags')
and context.reuse_driver_from_tags)
return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session'))
and (test_passed or not restart_driver_after_failure))
|
python
|
{
"resource": ""
}
|
q19647
|
get_long_description
|
train
|
def get_long_description():
"""Get README content and update rst urls
:returns: long description
"""
# Get readme content
readme = read_file('README.rst')
# Change rst urls to ReadTheDocs html urls
docs_url = 'http://toolium.readthedocs.org/en/latest'
description = readme.replace('/CHANGELOG.rst', '{}/changelog.html'.format(docs_url))
for doc in ['driver_configuration', 'page_objects', 'bdd_integration', 'visual_testing', 'tests_result_analysis']:
description = description.replace('/docs/{}.rst'.format(doc), '{}/{}.html'.format(docs_url, doc))
return description
|
python
|
{
"resource": ""
}
|
q19648
|
jira
|
train
|
def jira(test_key):
"""Decorator to update test status in Jira
:param test_key: test case key in Jira
:returns: jira test
"""
def decorator(test_item):
def modified_test(*args, **kwargs):
save_jira_conf()
try:
test_item(*args, **kwargs)
except Exception as e:
error_message = get_error_message_from_exception(e)
test_comment = "The test '{}' has failed: {}".format(args[0].get_method_name(), error_message)
add_jira_status(test_key, 'Fail', test_comment)
raise
add_jira_status(test_key, 'Pass', None)
modified_test.__name__ = test_item.__name__
return modified_test
return decorator
|
python
|
{
"resource": ""
}
|
q19649
|
save_jira_conf
|
train
|
def save_jira_conf():
"""Read Jira configuration from properties file and save it"""
global enabled, execution_url, summary_prefix, labels, comments, fix_version, build, only_if_changes, attachments
config = DriverWrappersPool.get_default_wrapper().config
enabled = config.getboolean_optional('Jira', 'enabled')
execution_url = config.get_optional('Jira', 'execution_url')
summary_prefix = config.get_optional('Jira', 'summary_prefix')
labels = config.get_optional('Jira', 'labels')
comments = config.get_optional('Jira', 'comments')
fix_version = config.get_optional('Jira', 'fixversion')
build = config.get_optional('Jira', 'build')
only_if_changes = config.getboolean_optional('Jira', 'onlyifchanges')
attachments = []
|
python
|
{
"resource": ""
}
|
q19650
|
add_jira_status
|
train
|
def add_jira_status(test_key, test_status, test_comment):
"""Save test status and comments to update Jira later
:param test_key: test case key in Jira
:param test_status: test case status
:param test_comment: test case comments
"""
global attachments
if test_key and enabled:
if test_key in jira_tests_status:
# Merge data with previous test status
previous_status = jira_tests_status[test_key]
test_status = 'Pass' if previous_status[1] == 'Pass' and test_status == 'Pass' else 'Fail'
if previous_status[2] and test_comment:
test_comment = '{}\n{}'.format(previous_status[2], test_comment)
elif previous_status[2] and not test_comment:
test_comment = previous_status[2]
attachments += previous_status[3]
# Add or update test status
jira_tests_status[test_key] = (test_key, test_status, test_comment, attachments)
|
python
|
{
"resource": ""
}
|
q19651
|
change_jira_status
|
train
|
def change_jira_status(test_key, test_status, test_comment, test_attachments):
"""Update test status in Jira
:param test_key: test case key in Jira
:param test_status: test case status
:param test_comment: test case comments
:param test_attachments: test case attachments
"""
logger = logging.getLogger(__name__)
if not execution_url:
logger.warning("Test Case '%s' can not be updated: execution_url is not configured", test_key)
return
logger.info("Updating Test Case '%s' in Jira with status %s", test_key, test_status)
composed_comments = comments
if test_comment:
composed_comments = '{}\n{}'.format(comments, test_comment) if comments else test_comment
payload = {'jiraTestCaseId': test_key, 'jiraStatus': test_status, 'summaryPrefix': summary_prefix,
'labels': labels, 'comments': composed_comments, 'version': fix_version, 'build': build}
if only_if_changes:
payload['onlyIfStatusChanges'] = 'true'
try:
if test_attachments and len(test_attachments) > 0:
files = dict()
for index in range(len(test_attachments)):
files['attachments{}'.format(index)] = open(test_attachments[index], 'rb')
else:
files = None
response = requests.post(execution_url, data=payload, files=files)
except Exception as e:
logger.warning("Error updating Test Case '%s': %s", test_key, e)
return
if response.status_code >= 400:
logger.warning("Error updating Test Case '%s': [%s] %s", test_key, response.status_code,
get_error_message(response.content))
else:
logger.debug("%s", response.content.decode().splitlines()[0])
|
python
|
{
"resource": ""
}
|
q19652
|
get_error_message
|
train
|
def get_error_message(response_content):
"""Extract error message from the HTTP response
:param response_content: HTTP response from test case execution API
:returns: error message
"""
apache_regex = re.compile('.*<u>(.*)</u></p><p>.*')
match = apache_regex.search(response_content)
if match:
error_message = match.group(1)
else:
local_regex = re.compile('.*<title>(.*)</title>.*')
match = local_regex.search(response_content)
if match:
error_message = match.group(1)
else:
error_message = response_content
return error_message
|
python
|
{
"resource": ""
}
|
q19653
|
Button.click
|
train
|
def click(self):
"""Click the element
:returns: page element instance
"""
try:
self.wait_until_clickable().web_element.click()
except StaleElementReferenceException:
# Retry if element has changed
self.web_element.click()
return self
|
python
|
{
"resource": ""
}
|
q19654
|
PageObject._get_page_elements
|
train
|
def _get_page_elements(self):
"""Return page elements and page objects of this page object
:returns: list of page elements and page objects
"""
page_elements = []
for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, CommonObject):
page_elements.append(value)
return page_elements
|
python
|
{
"resource": ""
}
|
q19655
|
PageObject.wait_until_loaded
|
train
|
def wait_until_loaded(self, timeout=None):
"""Wait until page object is loaded
Search all page elements configured with wait=True
:param timeout: max time to wait
:returns: this page object instance
"""
for element in self._get_page_elements():
if hasattr(element, 'wait') and element.wait:
from toolium.pageelements.page_element import PageElement
if isinstance(element, PageElement):
# Pageelement and Group
element.wait_until_visible(timeout)
if isinstance(element, PageObject):
# PageObject and Group
element.wait_until_loaded(timeout)
return self
|
python
|
{
"resource": ""
}
|
q19656
|
ExtendedConfigParser.get_optional
|
train
|
def get_optional(self, section, option, default=None):
""" Get an option value for a given section
If the section or the option are not found, the default value is returned
:param section: config section
:param option: config option
:param default: default value
:returns: config value
"""
try:
return self.get(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
|
python
|
{
"resource": ""
}
|
q19657
|
ExtendedConfigParser.getboolean_optional
|
train
|
def getboolean_optional(self, section, option, default=False):
""" Get an option boolean value for a given section
If the section or the option are not found, the default value is returned
:param section: config section
:param option: config option
:param default: default value
:returns: boolean config value
"""
try:
return self.getboolean(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
|
python
|
{
"resource": ""
}
|
q19658
|
ExtendedConfigParser.deepcopy
|
train
|
def deepcopy(self):
"""Returns a deep copy of config object
:returns: a copy of the config object
"""
# Save actual config to a string
config_string = StringIO()
self.write(config_string)
# We must reset the buffer ready for reading.
config_string.seek(0)
# Create a new config object
config_copy = ExtendedConfigParser()
config_copy.readfp(config_string)
return config_copy
|
python
|
{
"resource": ""
}
|
q19659
|
ExtendedConfigParser.update_properties
|
train
|
def update_properties(self, new_properties):
""" Update config properties values
Property name must be equal to 'Section_option' of config property
:param new_properties: dict with new properties values
"""
[self._update_property_from_dict(section, option, new_properties)
for section in self.sections() for option in self.options(section)]
|
python
|
{
"resource": ""
}
|
q19660
|
ExtendedConfigParser._update_property_from_dict
|
train
|
def _update_property_from_dict(self, section, option, new_properties):
""" Update a config property value with a new property value
Property name must be equal to 'Section_option' of config property
:param section: config section
:param option: config option
:param new_properties: dict with new properties values
"""
try:
property_name = "{0}_{1}".format(section, option)
self.set(section, option, new_properties[property_name])
except KeyError:
pass
|
python
|
{
"resource": ""
}
|
q19661
|
ExtendedConfigParser.get_config_from_file
|
train
|
def get_config_from_file(conf_properties_files):
"""Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object
"""
# Initialize the config object
config = ExtendedConfigParser()
logger = logging.getLogger(__name__)
# Configure properties (last files could override properties)
found = False
files_list = conf_properties_files.split(';')
for conf_properties_file in files_list:
result = config.read(conf_properties_file)
if len(result) == 0:
message = 'Properties config file not found: %s'
if len(files_list) == 1:
logger.error(message, conf_properties_file)
raise Exception(message % conf_properties_file)
else:
logger.debug(message, conf_properties_file)
else:
logger.debug('Reading properties from file: %s', conf_properties_file)
found = True
if not found:
message = 'Any of the properties config files has been found'
logger.error(message)
raise Exception(message)
return config
|
python
|
{
"resource": ""
}
|
q19662
|
PageElements.web_elements
|
train
|
def web_elements(self):
"""Find multiple WebElements using element locator
:returns: list of web element objects
:rtype: list of selenium.webdriver.remote.webelement.WebElement
or list of appium.webdriver.webelement.WebElement
"""
if not self._web_elements or not self.config.getboolean_optional('Driver', 'save_web_element'):
if self.parent:
self._web_elements = self.utils.get_web_element(self.parent).find_elements(*self.locator)
else:
self._web_elements = self.driver.find_elements(*self.locator)
return self._web_elements
|
python
|
{
"resource": ""
}
|
q19663
|
PageElements.page_elements
|
train
|
def page_elements(self):
"""Find multiple PageElement using element locator
:returns: list of page element objects
:rtype: list of toolium.pageelements.PageElement
"""
if not self._page_elements or not self.config.getboolean_optional('Driver', 'save_web_element'):
self._page_elements = []
for order, web_element in enumerate(self.web_elements):
# Create multiple PageElement with original locator and order
page_element = self.page_element_class(self.locator[0], self.locator[1], parent=self.parent,
order=order)
page_element.reset_object(self.driver_wrapper)
page_element._web_element = web_element
self._page_elements.append(page_element)
return self._page_elements
|
python
|
{
"resource": ""
}
|
q19664
|
PageElement.web_element
|
train
|
def web_element(self):
"""Find WebElement using element locator
:returns: web element object
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
try:
self._find_web_element()
except NoSuchElementException as exception:
parent_msg = " and parent locator '{}'".format(self.parent) if self.parent else ''
msg = "Page element of type '%s' with locator %s%s not found"
self.logger.error(msg, type(self).__name__, self.locator, parent_msg)
exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg))
raise exception
return self._web_element
|
python
|
{
"resource": ""
}
|
q19665
|
PageElement._find_web_element
|
train
|
def _find_web_element(self):
"""Find WebElement using element locator and save it in _web_element attribute"""
if not self._web_element or not self.config.getboolean_optional('Driver', 'save_web_element'):
# If the element is encapsulated we use the shadowroot tag in yaml (eg. Shadowroot: root_element_name)
if self.shadowroot:
if self.locator[0] != By.CSS_SELECTOR:
raise Exception('Locator type should be CSS_SELECTOR using shadowroot but found: '
'%s'.format(self.locator[0]))
# querySelector only support CSS SELECTOR locator
self._web_element = self.driver.execute_script('return document.querySelector("%s").shadowRoot.'
'querySelector("%s")' % (self.shadowroot,
self.locator[1]))
else:
# Element will be finded from parent element or from driver
base = self.utils.get_web_element(self.parent) if self.parent else self.driver
# Find elements and get the correct index or find a single element
self._web_element = base.find_elements(*self.locator)[self.order] if self.order else base.find_element(
*self.locator)
|
python
|
{
"resource": ""
}
|
q19666
|
PageElement.scroll_element_into_view
|
train
|
def scroll_element_into_view(self):
"""Scroll element into view
:returns: page element instance
"""
x = self.web_element.location['x']
y = self.web_element.location['y']
self.driver.execute_script('window.scrollTo({0}, {1})'.format(x, y))
return self
|
python
|
{
"resource": ""
}
|
q19667
|
PageElement.assert_screenshot
|
train
|
def assert_screenshot(self, filename, threshold=0, exclude_elements=[], force=False):
"""Assert that a screenshot of the element is the same as a screenshot on disk, within a given threshold.
:param filename: the filename for the screenshot, which will be appended with ``.png``
:param threshold: percentage threshold for triggering a test failure (value between 0 and 1)
:param exclude_elements: list of WebElements, PageElements or element locators as a tuple (locator_type,
locator_value) that must be excluded from the assertion
:param force: if True, the screenshot is compared even if visual testing is disabled by configuration
"""
VisualTest(self.driver_wrapper, force).assert_screenshot(self.web_element, filename, self.__class__.__name__,
threshold, exclude_elements)
|
python
|
{
"resource": ""
}
|
q19668
|
InputText.text
|
train
|
def text(self, value):
"""Set value on the element
:param value: value to be set
"""
if self.driver_wrapper.is_ios_test() and not self.driver_wrapper.is_web_test():
self.web_element.set_value(value)
elif self.shadowroot:
self.driver.execute_script('return document.querySelector("%s")'
'.shadowRoot.querySelector("%s")'
'.value = "%s"' % (self.shadowroot, self.locator[1], value))
else:
self.web_element.send_keys(value)
|
python
|
{
"resource": ""
}
|
q19669
|
install_documentation
|
train
|
def install_documentation(path="./Litho1pt0-Notebooks"):
"""Install the example notebooks for litho1pt0 in the given location
WARNING: If the path exists, the Notebook files will be written into the path
and will overwrite any existing files with which they collide. The default
path ("./Litho1pt0-Notebooks") is chosen to make collision less likely / problematic
The documentation for litho1pt0 is in the form of jupyter notebooks.
Some dependencies exist for the notebooks to be useful:
- matplotlib: for some diagrams
- cartopy: for plotting map examples
litho1pt0 dependencies are explicitly imported into the notebooks including:
- stripy (for interpolating on the sphere)
- numpy
- scipy (for k-d tree point location)
"""
## Question - overwrite or not ? shutils fails if directory exists.
Notebooks_Path = _pkg_resources.resource_filename('litho1pt0', 'Notebooks')
ct = _dir_util.copy_tree(Notebooks_Path, path, preserve_mode=1, preserve_times=1, preserve_symlinks=1, update=0, verbose=1, dry_run=0)
return
|
python
|
{
"resource": ""
}
|
q19670
|
remove_duplicates
|
train
|
def remove_duplicates(vector_tuple):
"""
Remove duplicates rows from N equally-sized arrays
"""
array = np.column_stack(vector_tuple)
a = np.ascontiguousarray(array)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
return list(b.T)
|
python
|
{
"resource": ""
}
|
q19671
|
Triangulation._is_collinear
|
train
|
def _is_collinear(self, x, y):
"""
Checks if first three points are collinear
"""
pts = np.column_stack([x[:3], y[:3], np.ones(3)])
return np.linalg.det(pts) == 0.0
|
python
|
{
"resource": ""
}
|
q19672
|
Triangulation._deshuffle_field
|
train
|
def _deshuffle_field(self, *args):
"""
Return to original ordering
"""
ip = self._invpermutation
fields = []
for arg in args:
fields.append( arg[ip] )
if len(fields) == 1:
return fields[0]
else:
return fields
|
python
|
{
"resource": ""
}
|
q19673
|
Triangulation.gradient
|
train
|
def gradient(self, f, nit=3, tol=1e-3, guarantee_convergence=False):
"""
Return the gradient of an n-dimensional array.
The method consists of minimizing a quadratic functional Q(G) over
gradient vectors (in x and y directions), where Q is an approximation
to the linearized curvature over the triangulation of a C-1 bivariate
function F(x,y) which interpolates the nodal values and gradients.
Parameters
----------
f : array of floats, shape (n,)
field over which to evaluate the gradient
nit: int (default: 3)
number of iterations to reach a convergence tolerance, tol
nit >= 1
tol: float (default: 1e-3)
maximum change in gradient between iterations.
convergence is reached when this condition is met.
Returns
-------
dfdx : array of floats, shape (n,)
derivative of f in the x direction
dfdy : array of floats, shape (n,)
derivative of f in the y direction
Notes
-----
For SIGMA = 0, optimal efficiency was achieved in testing with
tol = 0, and nit = 3 or 4.
The restriction of F to an arc of the triangulation is taken to be
the Hermite interpolatory tension spline defined by the data values
and tangential gradient components at the endpoints of the arc, and
Q is the sum over the triangulation arcs, excluding interior
constraint arcs, of the linearized curvatures of F along the arcs --
the integrals over the arcs of D2F(T)**2, where D2F(T) is the second
derivative of F with respect to distance T along the arc.
"""
if f.size != self.npoints:
raise ValueError('f should be the same size as mesh')
gradient = np.zeros((2,self.npoints), order='F', dtype=np.float32)
sigma = 0
iflgs = 0
f = self._shuffle_field(f)
ierr = 1
while ierr == 1:
ierr = _srfpack.gradg(self._x, self._y, f, self.lst, self.lptr, self.lend,\
iflgs, sigma, gradient, nit=nit, dgmax=tol)
if not guarantee_convergence:
break
if ierr < 0:
raise ValueError('ierr={} in gradg\n{}'.format(ierr, _ier_codes[ierr]))
return self._deshuffle_field(gradient[0], gradient[1])
|
python
|
{
"resource": ""
}
|
q19674
|
Triangulation.gradient_local
|
train
|
def gradient_local(self, f, index):
"""
Return the gradient at a specified node.
This routine employs a local method, in which values depend only on nearby
data points, to compute an estimated gradient at a node.
gradient_local() is more efficient than gradient() only if it is unnecessary
to compute gradients at all of the nodes. Both routines have similar accuracy.
Parameters
----------
"""
if f.size != self.npoints:
raise ValueError('f should be the same size as mesh')
f = self._shuffle_field(f)
gradX, gradY, l = _srfpack.gradl(index + 1, self._x, self._y, f,\
self.lst, self.lptr, self.lend)
return gradX, gradY
|
python
|
{
"resource": ""
}
|
q19675
|
Triangulation.interpolate
|
train
|
def interpolate(self, xi, yi, zdata, order=1):
"""
Base class to handle nearest neighbour, linear, and cubic interpolation.
Given a triangulation of a set of nodes and values at the nodes,
this method interpolates the value at the given xi,yi coordinates.
Parameters
----------
xi : float / array of floats, shape (l,)
x Cartesian coordinate(s)
yi : float / array of floats, shape (l,)
y Cartesian coordinate(s)
zdata : array of floats, shape (n,)
value at each point in the triangulation
must be the same size of the mesh
order : int (default=1)
order of the interpolatory function used
0 = nearest-neighbour
1 = linear
3 = cubic
Returns
-------
zi : float / array of floats, shape (l,)
interpolates value(s) at (xi, yi)
err : int / array of ints, shape (l,)
whether interpolation (0), extrapolation (1) or error (other)
"""
if order == 0:
zierr = np.zeros_like(xi, dtype=np.int)
return self.interpolate_nearest(xi, yi, zdata), zierr
elif order == 1:
return self.interpolate_linear(xi, yi, zdata)
elif order == 3:
return self.interpolate_cubic(xi, yi, zdata)
else:
raise ValueError("order must be 0, 1, or 3")
|
python
|
{
"resource": ""
}
|
q19676
|
Triangulation.interpolate_nearest
|
train
|
def interpolate_nearest(self, xi, yi, zdata):
"""
Nearest-neighbour interpolation.
Calls nearnd to find the index of the closest neighbours to xi,yi
Parameters
----------
xi : float / array of floats, shape (l,)
x coordinates on the Cartesian plane
yi : float / array of floats, shape (l,)
y coordinates on the Cartesian plane
Returns
-------
zi : float / array of floats, shape (l,)
nearest-neighbour interpolated value(s) of (xi,yi)
"""
if zdata.size != self.npoints:
raise ValueError('zdata should be same size as mesh')
zdata = self._shuffle_field(zdata)
ist = np.ones_like(xi, dtype=np.int32)
ist, dist = _tripack.nearnds(xi, yi, ist, self._x, self._y,
self.lst, self.lptr, self.lend)
return zdata[ist - 1]
|
python
|
{
"resource": ""
}
|
q19677
|
Triangulation.containing_triangle
|
train
|
def containing_triangle(self, xi, yi):
"""
Returns indices of the triangles containing xi yi
Parameters
----------
xi : float / array of floats, shape (l,)
Cartesian coordinates in the x direction
yi : float / array of floats, shape (l,)
Cartesian coordinates in the y direction
Returns
-------
tri_indices: array of ints, shape (l,)
Notes
-----
The simplices are found as cartesian.Triangulation.simplices[tri_indices]
"""
p = self._permutation
pts = np.column_stack([xi, yi])
sorted_simplices = np.sort(self._simplices, axis=1)
triangles = []
for pt in pts:
t = _tripack.trfind(3, pt[0], pt[1], self._x, self._y, self.lst, self.lptr, self.lend)
tri = np.sort(t) - 1
triangles.extend(np.where(np.all(p[sorted_simplices]==p[tri], axis=1))[0])
return np.array(triangles).ravel()
|
python
|
{
"resource": ""
}
|
q19678
|
Triangulation.identify_vertex_neighbours
|
train
|
def identify_vertex_neighbours(self, vertex):
"""
Find the neighbour-vertices in the triangulation for the given vertex
Searches self.simplices for vertex entries and sorts neighbours
"""
simplices = self.simplices
ridx, cidx = np.where(simplices == vertex)
neighbour_array = np.unique(np.hstack([simplices[ridx]])).tolist()
neighbour_array.remove(vertex)
return neighbour_array
|
python
|
{
"resource": ""
}
|
q19679
|
Triangulation.identify_vertex_triangles
|
train
|
def identify_vertex_triangles(self, vertices):
"""
Find all triangles which own any of the vertices in the list provided
"""
triangles = []
for vertex in np.array(vertices).reshape(-1):
triangles.append(np.where(self.simplices == vertex)[0])
return np.unique(np.concatenate(triangles))
|
python
|
{
"resource": ""
}
|
q19680
|
Triangulation.convex_hull
|
train
|
def convex_hull(self):
"""
Find the Convex Hull of the internal set of x,y points.
Returns
-------
bnodes : array of ints
indices corresponding to points on the convex hull
"""
bnodes, nb, na, nt = _tripack.bnodes(self.lst, self.lptr, self.lend, self.npoints)
return self._deshuffle_simplices(bnodes[:nb] - 1)
|
python
|
{
"resource": ""
}
|
q19681
|
Triangulation.areas
|
train
|
def areas(self):
"""
Compute the area of each triangle within the triangulation of points.
Returns
-------
area : array of floats, shape (nt,)
area of each triangle in self.simplices where nt
is the number of triangles.
"""
v1 = self.points[self.simplices[:,1]] - self.points[self.simplices[:,0]]
v2 = self.points[self.simplices[:,2]] - self.points[self.simplices[:,1]]
area = 0.5*(v1[:,0]*v2[:,1] - v1[:,1]*v2[:,0])
return area
|
python
|
{
"resource": ""
}
|
q19682
|
Triangulation.join
|
train
|
def join(self, t2, unique=False):
"""
Join this triangulation with another. If the points are known to have no duplicates, then
set unique=False to skip the testing and duplicate removal
"""
x_v1 = np.concatenate((self.x, t2.x), axis=0)
y_v1 = np.concatenate((self.y, t2.y), axis=0)
## remove any duplicates
if not unique:
a = np.ascontiguousarray(np.vstack((x_v1, y_v1)).T)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
unique_coords = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
x_v1 = unique_coords[:,0]
y_v1 = unique_coords[:,1]
return x_v1, y_v1
|
python
|
{
"resource": ""
}
|
q19683
|
write_processed_litho_data
|
train
|
def write_processed_litho_data(filename, litho_data, litho_points):
"""
Ensures that the data is stored in a format which is valid for initialising the class
"""
np.savez_compressed(filename, litho1_all_data=litho_data, litho1_mesh_coords=litho_points)
return
|
python
|
{
"resource": ""
}
|
q19684
|
weighted_average_to_nodes
|
train
|
def weighted_average_to_nodes(x1, x2, data, interpolator ):
""" Weighted average of scattered data to the nodal points
of a triangulation using the barycentric coordinates as
weightings.
Parameters
----------
x1, x2 : 1D arrays arrays of x,y or lon, lat (radians)
data : 1D array of data to be lumped to the node locations
interpolator : a stripy.Triangulation or stripy.sTriangulation object
which defines the node locations and their triangulation
Returns
-------
grid : 1D array containing the results of the weighted average
norm : 1D array of the normalisation used to compute `grid`
count : 1D int array of number of points that contribute anything to a given node
"""
import numpy as np
gridded_data = np.zeros(interpolator.npoints)
norm = np.zeros(interpolator.npoints)
count = np.zeros(interpolator.npoints, dtype=np.int)
bcc, nodes = interpolator.containing_simplex_and_bcc(x1, x2)
# Beware vectorising the reduction operation !!
for i in range(0, len(data)):
grid[nodes[i][0]] += bcc[i][0] * data[i]
grid[nodes[i][1]] += bcc[i][1] * data[i]
grid[nodes[i][2]] += bcc[i][2] * data[i]
norm[nodes[i][0]] += bcc[i][0]
norm[nodes[i][1]] += bcc[i][1]
norm[nodes[i][2]] += bcc[i][2]
count[nodes[i][0]] += 1
count[nodes[i][1]] += 1
count[nodes[i][2]] += 1
grid[np.where(norm > 0.0)] /= norm[np.where(norm > 0.0)]
return grid, norm, count
|
python
|
{
"resource": ""
}
|
q19685
|
great_circle_Npoints
|
train
|
def great_circle_Npoints(lonlat1r, lonlat2r, N):
"""
N points along the line joining lonlat1 and lonlat2
"""
ratio = np.linspace(0.0,1.0, N).reshape(-1,1)
xyz1 = lonlat2xyz(lonlat1r[0], lonlat1r[1])
xyz2 = lonlat2xyz(lonlat2r[0], lonlat2r[1])
mids = ratio * xyz2 + (1.0-ratio) * xyz1
norm = np.sqrt((mids**2).sum(axis=1))
xyzN = mids / norm.reshape(-1,1)
lonlatN = xyz2lonlat( xyzN[:,0], xyzN[:,1], xyzN[:,2])
return lonlatN
|
python
|
{
"resource": ""
}
|
q19686
|
sTriangulation._generate_permutation
|
train
|
def _generate_permutation(self, npoints):
"""
Create shuffle and deshuffle vectors
"""
i = np.arange(0, npoints)
# permutation
p = np.random.permutation(npoints)
ip = np.empty_like(p)
# inverse permutation
ip[p[i]] = i
return p, ip
|
python
|
{
"resource": ""
}
|
q19687
|
sTriangulation._is_collinear
|
train
|
def _is_collinear(self, lons, lats):
"""
Checks if first three points are collinear - in the spherical
case this corresponds to all points lying on a great circle
and, hence, all coordinate vectors being in a single plane.
"""
x, y, z = lonlat2xyz(lons[:3], lats[:3])
pts = np.column_stack([x, y, z])
collinearity = (np.linalg.det(pts.T) == 0.0)
return collinearity
|
python
|
{
"resource": ""
}
|
q19688
|
sTriangulation.gradient_xyz
|
train
|
def gradient_xyz(self, f, nit=3, tol=1e-3, guarantee_convergence=False):
"""
Return the cartesian components of the gradient
of a scalar field on the surface of the sphere.
The method consists of minimizing a quadratic functional Q(G) over
gradient vectors, where Q is an approximation to the linearized
curvature over the triangulation of a C-1 bivariate function F(x,y)
which interpolates the nodal values and gradients.
Parameters
----------
f : array of floats, shape (n,)
field over which to evaluate the gradient
nit: int (default: 3)
number of iterations to reach a convergence tolerance, tol
nit >= 1
tol: float (default: 1e-3)
maximum change in gradient between iterations.
convergence is reached when this condition is met.
Returns
-------
dfdx : array of floats, shape (n,)
derivative of f in the x direction
dfdy : array of floats, shape (n,)
derivative of f in the y direction
dfdz : array of floats, shape (n,)
derivative of f in the z direction
Notes
-----
For SIGMA = 0, optimal efficiency was achieved in testing with
tol = 0, and nit = 3 or 4.
The restriction of F to an arc of the triangulation is taken to be
the Hermite interpolatory tension spline defined by the data values
and tangential gradient components at the endpoints of the arc, and
Q is the sum over the triangulation arcs, excluding interior
constraint arcs, of the linearized curvatures of F along the arcs --
the integrals over the arcs of D2F(T)**2, where D2F(T) is the second
derivative of F with respect to distance T along the arc.
"""
if f.size != self.npoints:
raise ValueError('f should be the same size as mesh')
# gradient = np.zeros((3,self.npoints), order='F', dtype=np.float32)
sigma = 0
iflgs = 0
f = self._shuffle_field(f)
ierr = 1
while ierr == 1:
grad, ierr = _ssrfpack.gradg(self._x, self._y, self._z, f, self.lst, self.lptr, self.lend,\
iflgs, sigma, nit, tol)
if not guarantee_convergence:
break
if ierr < 0:
raise ValueError('ierr={} in gradg\n{}'.format(ierr, _ier_codes[ierr]))
return self._deshuffle_field(grad[0], grad[1], grad[2])
|
python
|
{
"resource": ""
}
|
q19689
|
sTriangulation.tri_area
|
train
|
def tri_area(self, lons, lats):
"""
Calculate the area enclosed by 3 points on the unit sphere.
Parameters
----------
lons : array of floats, shape (3)
longitudinal coordinates in radians
lats : array of floats, shape (3)
latitudinal coordinates in radians
Returns
-------
area : float
area of triangle on the unit sphere
"""
lons, lats = self._check_integrity(lons, lats)
# translate to unit sphere
x, y, z = _stripack.trans(lats, lons)
# compute area
area = _stripack.areas(x, y, z)
return area
|
python
|
{
"resource": ""
}
|
q19690
|
sTriangulation.areas
|
train
|
def areas(self):
"""
Compute the area each triangle within the triangulation of points
on the unit sphere.
Returns
-------
area : array of floats, shape (nt,)
area of each triangle in self.simplices where nt
is the number of triangles.
Notes
-----
This uses a Fortran 90 subroutine that wraps the AREA function
to iterate over many points.
"""
return _stripack.triareas(self.x, self.y, self.z, self.simplices.T+1)
|
python
|
{
"resource": ""
}
|
q19691
|
sTriangulation.join
|
train
|
def join(self, t2, unique=False):
"""
Join this triangulation with another. If the points are known to have no duplicates, then
set unique=True to skip the testing and duplicate removal
"""
lonv1 = np.concatenate((self.lons, t2.lons), axis=0)
latv1 = np.concatenate((self.lats, t2.lats), axis=0)
## remove any duplicates
if not unique:
lonv1, latv1 = remove_duplicate_lonlat(lonv1, latv1)
return lonv1, latv1
|
python
|
{
"resource": ""
}
|
q19692
|
SalesforceBulk.get_query_batch_request
|
train
|
def get_query_batch_request(self, batch_id, job_id=None):
""" Fetch the request sent for the batch. Note should only used for query batches """
if not job_id:
job_id = self.lookup_job_id(batch_id)
url = self.endpoint + "/job/{}/batch/{}/request".format(job_id, batch_id)
resp = requests.get(url, headers=self.headers())
self.check_status(resp)
return resp.text
|
python
|
{
"resource": ""
}
|
q19693
|
SalesforceBulk.abort_job
|
train
|
def abort_job(self, job_id):
"""Abort a given bulk job"""
doc = self.create_abort_job_doc()
url = self.endpoint + "/job/%s" % job_id
resp = requests.post(
url,
headers=self.headers(),
data=doc
)
self.check_status(resp)
|
python
|
{
"resource": ""
}
|
q19694
|
SalesforceBulk.get_all_results_for_query_batch
|
train
|
def get_all_results_for_query_batch(self, batch_id, job_id=None, chunk_size=2048):
"""
Gets result ids and generates each result set from the batch and returns it
as an generator fetching the next result set when needed
Args:
batch_id: id of batch
job_id: id of job, if not provided, it will be looked up
"""
result_ids = self.get_query_batch_result_ids(batch_id, job_id=job_id)
if not result_ids:
raise RuntimeError('Batch is not complete')
for result_id in result_ids:
yield self.get_query_batch_results(
batch_id,
result_id,
job_id=job_id,
chunk_size=chunk_size
)
|
python
|
{
"resource": ""
}
|
q19695
|
MatchApiV4.matchlist_by_account
|
train
|
def matchlist_by_account(
self,
region,
encrypted_account_id,
queue=None,
begin_time=None,
end_time=None,
begin_index=None,
end_index=None,
season=None,
champion=None,
):
"""
Get matchlist for ranked games played on given account ID and platform ID
and filtered using given filter parameters, if any
A number of optional parameters are provided for filtering. It is up to the caller to
ensure that the combination of filter parameters provided is valid for the requested
account, otherwise, no matches may be returned.
Note that if either beginIndex or endIndex are specified, then both must be specified and
endIndex must be greater than beginIndex.
If endTime is specified, but not beginTime, then beginTime is effectively the start of the
account's match history.
If beginTime is specified, but not endTime, then endTime is effectively the current time.
Note that endTime should be greater than beginTime if both are specified, although there is
no maximum limit on their range.
:param string region: The region to execute this request on
:param string encrypted_account_id: The account ID.
:param Set[int] queue: Set of queue IDs for which to filtering matchlist.
:param long begin_time: The begin time to use for filtering matchlist specified as
epoch milliseconds.
:param long end_time: The end time to use for filtering matchlist specified as epoch
milliseconds.
:param int begin_index: The begin index to use for filtering matchlist.
:param int end_index: The end index to use for filtering matchlist.
:param Set[int] season: Set of season IDs for which to filtering matchlist.
:param Set[int] champion: Set of champion IDs for which to filtering matchlist.
:returns: MatchlistDto
"""
url, query = MatchApiV4Urls.matchlist_by_account(
region=region,
encrypted_account_id=encrypted_account_id,
queue=queue,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index,
season=season,
champion=champion,
)
return self._raw_request(self.matchlist_by_account.__name__, region, url, query)
|
python
|
{
"resource": ""
}
|
q19696
|
MatchApiV4.timeline_by_match
|
train
|
def timeline_by_match(self, region, match_id):
"""
Get match timeline by match ID.
Not all matches have timeline data.
:param string region: The region to execute this request on
:param long match_id: The match ID.
:returns: MatchTimelineDto
"""
url, query = MatchApiV4Urls.timeline_by_match(region=region, match_id=match_id)
return self._raw_request(self.timeline_by_match.__name__, region, url, query)
|
python
|
{
"resource": ""
}
|
q19697
|
RateLimitHandler.after_request
|
train
|
def after_request(self, region, endpoint_name, method_name, url, response):
"""
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
"""
for limiter in self._limiters:
limiter.update_limiter(region, endpoint_name, method_name, response)
return response
|
python
|
{
"resource": ""
}
|
q19698
|
LolStatusApiV3.shard_data
|
train
|
def shard_data(self, region):
"""
Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus
"""
url, query = LolStatusApiV3Urls.shard_data(region=region)
return self._raw_request(self.shard_data.__name__, region, url, query)
|
python
|
{
"resource": ""
}
|
q19699
|
ChampionMasteryApiV4.by_summoner_by_champion
|
train
|
def by_summoner_by_champion(self, region, encrypted_summoner_id, champion_id):
"""
Get a champion mastery by player ID and champion ID.
:param string region: the region to execute this request on
:param string encrypted_summoner_id: Summoner ID associated with the player
:param long champion_id: Champion ID to retrieve Champion Mastery for
:returns: ChampionMasteryDTO: This object contains single Champion Mastery information for
player and champion combination.
"""
url, query = ChampionMasteryApiV4Urls.by_summoner_by_champion(
region=region,
encrypted_summoner_id=encrypted_summoner_id,
champion_id=champion_id,
)
return self._raw_request(
self.by_summoner_by_champion.__name__, region, url, query
)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.