sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def movie_body_count(data_set='movie_body_count'):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R."""
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'film-death-counts-Python.csv')
Y = read_csv(filename)
Y['Actors'] = Y['Actors'].apply(lambda x: x.split('|'))
Y['Genre'] = Y['Genre'].apply(lambda x: x.split('|'))
Y['Director'] = Y['Director'].apply(lambda x: x.split('|'))
return data_details_return({'Y': Y, 'info' : "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.",
}, data_set)
|
Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.
|
entailment
|
def movie_body_count_r_classify(data_set='movie_body_count'):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R."""
data = movie_body_count()['Y']
import pandas as pd
import numpy as np
X = data[['Year', 'Body_Count']]
Y = data['MPAA_Rating']=='R' # set label to be positive for R rated films.
# Create series of movie genres with the relevant index
s = data['Genre'].str.split('|').apply(pd.Series, 1).stack()
s.index = s.index.droplevel(-1) # to line up with df's index
# Extract from the series the unique list of genres.
genres = s.unique()
# For each genre extract the indices where it is present and add a column to X
for genre in genres:
index = s[s==genre].index.tolist()
values = pd.Series(np.zeros(X.shape[0]), index=X.index)
values[index] = 1
X[genre] = values
return data_details_return({'X': X, 'Y': Y, 'info' : "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.",
}, data_set)
|
Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.
|
entailment
|
def movielens100k(data_set='movielens100k'):
"""Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not data_available(data_set):
import zipfile
download_data(data_set)
dir_path = os.path.join(data_path, data_set)
zip = zipfile.ZipFile(os.path.join(dir_path, 'ml-100k.zip'), 'r')
for name in zip.namelist():
zip.extract(name, dir_path)
import pandas as pd
encoding = 'latin-1'
movie_path = os.path.join(data_path, 'movielens100k', 'ml-100k')
items = pd.read_csv(os.path.join(movie_path, 'u.item'), index_col = 'index', header=None, sep='|',names=['index', 'title', 'date', 'empty', 'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation', 'Children''s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'], encoding=encoding)
users = pd.read_csv(os.path.join(movie_path, 'u.user'), index_col = 'index', header=None, sep='|', names=['index', 'age', 'sex', 'job', 'id'], encoding=encoding)
parts = ['u1.base', 'u1.test', 'u2.base', 'u2.test','u3.base', 'u3.test','u4.base', 'u4.test','u5.base', 'u5.test','ua.base', 'ua.test','ub.base', 'ub.test']
ratings = []
for part in parts:
rate_part = pd.read_csv(os.path.join(movie_path, part), index_col = 'index', header=None, sep='\t', names=['user', 'item', 'rating', 'index'], encoding=encoding)
rate_part['split'] = part
ratings.append(rate_part)
Y = pd.concat(ratings)
return data_details_return({'Y':Y, 'film_info':items, 'user_info':users, 'info': 'The Movielens 100k data'}, data_set)
|
Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use.
|
entailment
|
def ceres(data_set='ceres'):
"""Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later."""
if not data_available(data_set):
download_data(data_set)
import pandas as pd
data = pd.read_csv(os.path.join(data_path, data_set, 'ceresData.txt'), index_col = 'Tag', header=None, sep='\t',names=['Tag', 'Mittlere Sonnenzeit', 'Gerade Aufstig in Zeit', 'Gerade Aufstiegung in Graden', 'Nordlich Abweich', 'Geocentrische Laenger', 'Geocentrische Breite', 'Ort der Sonne + 20" Aberration', 'Logar. d. Distanz'], parse_dates=True, dayfirst=False)
return data_details_return({'data': data}, data_set)
|
Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later.
|
entailment
|
def calc_horizontal_infrared_radiation_intensity(weatherdata):
""" Estimates the global horizontal infrared radiation intensity based
on drybulb, dewpoint and opaque sky cover.
References:
Walton, G. N. 1983. Thermal Analysis Research Program Reference Manual. NBSSIR 83-
2655. National Bureau of Standards, p. 21.
Clark, G. and C. Allen, "The Estimation of Atmospheric Radiation for Clear and Cloudy
Skies," Proceedings 2nd National Passive Solar Conference (AS/ISES), 1978, pp. 675-678.
"""
temp_drybulb_K = C2K(weatherdata._dry_bulb_temperature)
temp_dew_K = C2K(weatherdata.dew_point_temperature)
N = weatherdata.opaque_sky_cover
sky_emissivity = (0.787 + 0.764 * math.log(temp_dew_K / C2K(0.0)) *
(1.0 + 0.0224 * N - 0.0035 * N ** 2 + 0.00028 * N ** 3))
hor_id = sky_emissivity * sigma * temp_drybulb_K ** 4
weatherdata.horizontal_infrared_radiation_intensity = hor_id
|
Estimates the global horizontal infrared radiation intensity based
on drybulb, dewpoint and opaque sky cover.
References:
Walton, G. N. 1983. Thermal Analysis Research Program Reference Manual. NBSSIR 83-
2655. National Bureau of Standards, p. 21.
Clark, G. and C. Allen, "The Estimation of Atmospheric Radiation for Clear and Cloudy
Skies," Proceedings 2nd National Passive Solar Conference (AS/ISES), 1978, pp. 675-678.
|
entailment
|
def download_url(url, dir_name='.', save_name=None, store_directory=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
if sys.version_info>=(3,0):
from urllib.parse import quote
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
else:
from urllib2 import quote
from urllib2 import urlopen
from urllib2 import URLError as HTTPError
i = url.rfind('/')
file = url[i+1:]
if store_directory is not None:
dir_name = os.path.join(dir_name, store_directory)
if save_name is None:
save_name = file
save_name = os.path.join(dir_name, save_name)
print("Downloading ", url, "->", save_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urlopen(url+suffix)
except HTTPError as e:
if not hasattr(e, "code"):
raise
if e.code > 399 and e.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(e.code))
elif e.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(e.code))
except URLError as e:
raise ValueError('Tried url ' + url + suffix + ' and failed with error ' + str(e.reason))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.get("Content-Length")
if content_length_str:
#if sys.version_info>=(3,0):
try:
file_size = int(content_length_str)
except:
try:
file_size = int(content_length_str[0])
except:
file_size = None
if file_size == 1:
file_size = None
#else:
# file_size = int(content_length_str)
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length = 30
percentage = 1./line_length
if file_size:
print("|"+"{:^{ll}}".format("Downloading {:7.3f}MB".format(file_size/(1048576.)), ll=line_length)+"|")
from itertools import cycle
cycle_str = cycle('>')
sys.stdout.write("|")
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
# If content_length_str was incorrect, we can end up with many too many equals signs, catches this edge case
#correct_meta = float(file_size_dl)/file_size <= 1.0
if file_size:
if (float(file_size_dl)/file_size) >= percentage:
sys.stdout.write(next(cycle_str))
sys.stdout.flush()
percentage += 1./line_length
#percentage = "="*int(line_length*float(file_size_dl)/file_size)
#status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.), full=file_size/(1048576.), ll=line_length, perc=percentage)
else:
sys.stdout.write(" "*(len(status)) + "\r")
status = r"{dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
#sys.stdout.write(status)
if file_size:
sys.stdout.write("|")
sys.stdout.flush()
print(status)
|
Download a file from a url and save it to disk.
|
entailment
|
def access_elementusers(self, elementuser_id, access_id=None, tenant_id=None, api_version="v2.0"):
"""
Get all accesses for a particular user
**Parameters:**:
- **elementuser_id**: Element User ID
- **access_id**: (optional) Access ID
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
if not access_id:
url = str(cur_ctlr) + "/{}/api/tenants/{}/elementusers/{}/access".format(api_version,
tenant_id,
elementuser_id)
else:
url = str(cur_ctlr) + "/{}/api/tenants/{}/elementusers/{}/access/{}".format(api_version,
tenant_id,
elementuser_id,
access_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "get")
|
Get all accesses for a particular user
**Parameters:**:
- **elementuser_id**: Element User ID
- **access_id**: (optional) Access ID
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
|
entailment
|
def logout(self, api_version="v2.0"):
"""
Logout current session
**Parameters:**:
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/logout".format(api_version)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "get")
|
Logout current session
**Parameters:**:
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
|
entailment
|
def login(self, email=None, password=None):
"""
Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts.
Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly.
**Parameters:**:
- **email**: Email to log in for, will prompt if not entered.
- **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users.
**Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed.
"""
# if email not given in function, or if first login fails, prompt.
if email is None:
# If user is not set, pull from cache. If not in cache, prompt.
if self._parent_class.email:
email = self._parent_class.email
else:
email = compat_input("login: ")
if password is None:
# if pass not given on function, or if first login fails, prompt.
if self._parent_class._password:
password = self._parent_class._password
else:
password = getpass.getpass()
# Try and login
# For SAML 2.0 support, set the Referer URL prior to logging in.
# add referer header to the session.
self._parent_class.add_headers({'Referer': "{}/v2.0/api/login".format(self._parent_class.controller)})
# call the login API.
response = self._parent_class.post.login({"email": email, "password": password})
if response.cgx_status:
# Check for SAML 2.0 login
if not response.cgx_content.get('x_auth_token'):
urlpath = response.cgx_content.get("urlpath", "")
request_id = response.cgx_content.get("requestId", "")
if urlpath and request_id:
# SAML 2.0
print('SAML 2.0: To finish login open the following link in a browser\n\n{0}\n\n'.format(urlpath))
found_auth_token = False
for i in range(20):
print('Waiting for {0} seconds for authentication...'.format((20 - i) * 5))
saml_response = self.check_sso_login(email, request_id)
if saml_response.cgx_status and saml_response.cgx_content.get('x_auth_token'):
found_auth_token = True
break
# wait before retry.
time.sleep(5)
if not found_auth_token:
print("Login time expired! Please re-login.\n")
# log response when debug
try:
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response, indent=4))
except (TypeError, ValueError):
# not JSON response, don't pretty print log.
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", str(response))
# print login error
print('Login failed, please try again', response)
# Flush command-line entered login info if failure.
self._parent_class.email = None
self._parent_class.password = None
return False
api_logger.info('Login successful:')
# if we got here, we either got an x_auth_token in the original login, or
# we got an auth_token cookie set via SAML. Figure out which.
auth_token = response.cgx_content.get('x_auth_token')
if auth_token:
# token in the original login (not saml) means region parsing has not been done.
# do now, and recheck if cookie needs set.
auth_region = self._parent_class.parse_region(response)
self._parent_class.update_region_to_controller(auth_region)
self._parent_class.reparse_login_cookie_after_region_update(response)
# debug info if needed
api_logger.debug("AUTH_TOKEN=%s", response.cgx_content.get('x_auth_token'))
# Step 2: Get operator profile for tenant ID and other info.
if self.interactive_update_profile_vars():
# pull tenant detail
if self._parent_class.tenant_id:
# add tenant values to API() object
if self.interactive_tenant_update_vars():
# Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for.
if self._parent_class.is_esp:
# ESP/MSP!
choose_status, chosen_client_id = self.interactive_client_choice()
if choose_status:
# attempt to login as client
clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {})
if clogin_resp.cgx_status:
# login successful, update profile and tenant info
c_profile = self.interactive_update_profile_vars()
t_profile = self.interactive_tenant_update_vars()
if c_profile and t_profile:
# successful full client login.
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return True
else:
if t_profile:
print("ESP Client Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
print("ESP Client Login failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
print("ESP Client Choice failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
# successful!
# clear password out of memory
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return True
else:
print("Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
else:
# Profile detail retrieval failed
self._parent_class.email = None
self._parent_class._password = None
return False
api_logger.info("EMAIL = %s", self._parent_class.email)
api_logger.info("USER_ID = %s", self._parent_class._user_id)
api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles))
api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id)
api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name)
api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session)
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
else:
# log response when debug
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response.cgx_content, indent=4))
# print login error
print('Login failed, please try again:', response.cgx_content)
# Flush command-line entered login info if failure.
self._parent_class.email = None
self._parent_class.password = None
# remove referer header prior to continuing.
self._parent_class.remove_header('Referer')
return False
|
Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts.
Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly.
**Parameters:**:
- **email**: Email to log in for, will prompt if not entered.
- **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users.
**Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed.
|
entailment
|
def use_token(self, token=None):
"""
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed.
"""
api_logger.info('use_token function:')
# check token is a string.
if not isinstance(token, (text_type, binary_type)):
api_logger.debug('"token" was not a text-style string: {}'.format(text_type(token)))
return False
# Start setup of constructor.
session = self._parent_class.expose_session()
# clear cookies
session.cookies.clear()
# Static Token uses X-Auth-Token header instead of cookies.
self._parent_class.add_headers({
'X-Auth-Token': token
})
# Step 2: Get operator profile for tenant ID and other info.
if self.interactive_update_profile_vars():
# pull tenant detail
if self._parent_class.tenant_id:
# add tenant values to API() object
if self.interactive_tenant_update_vars():
# Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for.
if self._parent_class.is_esp:
# ESP/MSP!
choose_status, chosen_client_id = self.interactive_client_choice()
if choose_status:
# attempt to login as client
clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {})
if clogin_resp.cgx_status:
# login successful, update profile and tenant info
c_profile = self.interactive_update_profile_vars()
t_profile = self.interactive_tenant_update_vars()
if c_profile and t_profile:
# successful full client login.
self._parent_class._password = None
return True
else:
if t_profile:
print("ESP Client Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Login failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Choice failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
# successful!
# clear password out of memory
self._parent_class._password = None
return True
else:
print("Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
# Profile detail retrieval failed
self._parent_class.email = None
self._parent_class._password = None
return False
api_logger.info("EMAIL = %s", self._parent_class.email)
api_logger.info("USER_ID = %s", self._parent_class._user_id)
api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles))
api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id)
api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name)
api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session)
return True
|
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed.
|
entailment
|
def interactive_tenant_update_vars(self):
"""
Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login.
**Returns:** Boolean on success/failure,
"""
api_logger.info('interactive_tenant_update_vars function:')
tenant_resp = self._parent_class.get.tenants(self._parent_class.tenant_id)
status = tenant_resp.cgx_status
tenant_dict = tenant_resp.cgx_content
if status:
api_logger.debug("new tenant_dict: %s", tenant_dict)
# Get Tenant info.
self._parent_class.tenant_name = tenant_dict.get('name', self._parent_class.tenant_id)
# is ESP/MSP?
self._parent_class.is_esp = tenant_dict.get('is_esp')
# grab tenant address for location.
address_lookup = tenant_dict.get('address', None)
if address_lookup:
tenant_address = address_lookup.get('street', "") + ", "
tenant_address += (str(address_lookup.get('street2', "")) + ", ")
tenant_address += (str(address_lookup.get('city', "")) + ", ")
tenant_address += (str(address_lookup.get('state', "")) + ", ")
tenant_address += (str(address_lookup.get('post_code', "")) + ", ")
tenant_address += (str(address_lookup.get('country', "")) + ", ")
else:
tenant_address = "Unknown"
self._parent_class.address = tenant_address
return True
else:
# update failed
return False
|
Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login.
**Returns:** Boolean on success/failure,
|
entailment
|
def interactive_update_profile_vars(self):
"""
Function to update the `cloudgenix.API` object with profile info. Run after login or client login.
**Returns:** Boolean on success/failure,
"""
profile = self._parent_class.get.profile()
if profile.cgx_status:
# if successful, save tenant id and email info to cli state.
self._parent_class.tenant_id = profile.cgx_content.get('tenant_id')
self._parent_class.email = profile.cgx_content.get('email')
self._parent_class._user_id = profile.cgx_content.get('id')
self._parent_class.roles = profile.cgx_content.get('roles', [])
self._parent_class.token_session = profile.cgx_content.get('token_session')
return True
else:
print("Profile retrieval failed.")
# clear password out of memory
self._parent_class._password = None
return False
|
Function to update the `cloudgenix.API` object with profile info. Run after login or client login.
**Returns:** Boolean on success/failure,
|
entailment
|
def interactive_client_choice(self):
"""
Present a menu for user to select from ESP/MSP managed clients they have permission to.
**Returns:** Tuple with (Boolean success, selected client ID).
"""
clients = self._parent_class.get.clients_t()
clients_perms = self._parent_class.get.permissions_clients_d(self._parent_class._user_id)
client_status = clients.cgx_status
clients_dict = clients.cgx_content
c_perms_status = clients_perms.cgx_status
c_perms_dict = clients_perms.cgx_content
# Build MSP/ESP id-name dict, get list of allowed tenants.
if client_status and c_perms_status:
client_id_name = {}
for client in clients_dict.get('items', []):
if type(client) is dict:
# create client ID to name map table.
client_id_name[client.get('id', "err")] = client.get('canonical_name')
# Valid clients w/permissions - create list of tuples for menu
menu_list = []
for client in c_perms_dict.get('items', []):
if type(client) is dict:
# add entry
client_id = client.get('client_id')
# create tuple of ( client name, client id ) to append to list
menu_list.append(
(client_id_name.get(client_id, client_id), client_id)
)
# empty menu?
if not menu_list:
# no clients
print("No ESP/MSP clients allowed for user.")
return False, {}
# ask user to select client
_, chosen_client_id = self.quick_menu("ESP/MSP Detected. Select a client to use:", "{0}) {1}", menu_list)
return True, chosen_client_id
else:
print("ESP/MSP detail retrieval failed.")
return False, {}
|
Present a menu for user to select from ESP/MSP managed clients they have permission to.
**Returns:** Tuple with (Boolean success, selected client ID).
|
entailment
|
def quick_menu(self, banner, list_line_format, choice_list):
"""
Function to display a quick menu for user input
**Parameters:**
- **banner:** Text to display before menu
- **list_line_format:** Print'ing string with format spots for index + tuple values
- **choice_list:** List of tuple values that you want returned if selected (and printed)
**Returns:** Tuple that was selected.
"""
# Setup menu
invalid = True
menu_int = -1
# loop until valid
while invalid:
print(banner)
for item_index, item_value in enumerate(choice_list):
print(list_line_format.format(item_index + 1, *item_value))
menu_choice = compat_input("\nChoose a Number or (Q)uit: ")
if str(menu_choice).lower() in ['q']:
# exit
print("Exiting..")
# best effort logout
self._parent_class.get.logout()
sys.exit(0)
# verify number entered
try:
menu_int = int(menu_choice)
sanity = True
except ValueError:
# not a number
print("ERROR: ", menu_choice)
sanity = False
# validate number chosen
if sanity and 1 <= menu_int <= len(choice_list):
invalid = False
else:
print("Invalid input, needs to be between 1 and {0}.\n".format(len(choice_list)))
# return the choice_list tuple that matches the entry.
return choice_list[int(menu_int) - 1]
|
Function to display a quick menu for user input
**Parameters:**
- **banner:** Text to display before menu
- **list_line_format:** Print'ing string with format spots for index + tuple values
- **choice_list:** List of tuple values that you want returned if selected (and printed)
**Returns:** Tuple that was selected.
|
entailment
|
def check_sso_login(self, operator_email, request_id):
"""
Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.)
"""
data = {
"email": operator_email,
"requestId": request_id
}
# If debug is set..
api_logger.info('check_sso_login function:')
response = self._parent_class.post.login(data=data)
# If valid response, but no token.
if not response.cgx_content.get('x_auth_token'):
# no valid login yet.
return response
# update with token and region
auth_region = self._parent_class.parse_region(response)
self._parent_class.update_region_to_controller(auth_region)
self._parent_class.reparse_login_cookie_after_region_update(response)
return response
|
Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.)
|
entailment
|
def logout(self, force=False):
"""
Interactive logout - ensures uid/tid cleared so `cloudgenix.API` object/ requests.Session can be re-used.
**Parameters:**:
- **force**: Bool, force logout API call, even when using a static AUTH_TOKEN.
**Returns:** Bool of whether the operation succeeded.
"""
# Extract requests session for manipulation.
session = self._parent_class.expose_session()
# if force = True, or token_session = None/False, call logout API.
if force or not self._parent_class.token_session:
# Call Logout
result = self._parent_class.get.logout()
if result.cgx_status:
# clear info from session.
self._parent_class.tenant_id = None
self._parent_class.tenant_name = None
self._parent_class.is_esp = None
self._parent_class.client_id = None
self._parent_class.address_string = None
self._parent_class.email = None
self._parent_class._user_id = None
self._parent_class._password = None
self._parent_class.roles = None
self._parent_class.token_session = None
# Cookies are removed via LOGOUT API call. if X-Auth-Token set, clear.
if session.headers.get('X-Auth-Token'):
self._parent_class.remove_header('X-Auth-Token')
return result.cgx_status
else:
# Token Session and not forced.
api_logger.debug('TOKEN SESSION, LOGOUT API NOT CALLED.')
# clear info from session.
self._parent_class.tenant_id = None
self._parent_class.tenant_name = None
self._parent_class.is_esp = None
self._parent_class.client_id = None
self._parent_class.address_string = None
self._parent_class.email = None
self._parent_class._user_id = None
self._parent_class._password = None
self._parent_class.roles = None
self._parent_class.token_session = None
# if X-Auth-Token set, clear.
if session.headers.get('X-Auth-Token'):
self._parent_class.remove_header('X-Auth-Token')
return True
|
Interactive logout - ensures uid/tid cleared so `cloudgenix.API` object/ requests.Session can be re-used.
**Parameters:**:
- **force**: Bool, force logout API call, even when using a static AUTH_TOKEN.
**Returns:** Bool of whether the operation succeeded.
|
entailment
|
def jd(api_response):
"""
JD (JSON Dump) function. Meant for quick pretty-printing of CloudGenix Response objects.
Example: `jd(cgx_sess.get.sites())`
**Returns:** No Return, directly prints all output.
"""
try:
# attempt to print the cgx_content. should always be a Dict if it exists.
print(json.dumps(api_response.cgx_content, indent=4))
except (TypeError, ValueError, AttributeError):
# cgx_content did not exist, or was not JSON serializable. Try pretty printing the base obj.
try:
print(json.dumps(api_response, indent=4))
except (TypeError, ValueError, AttributeError):
# Same issue, just raw print the passed data. Let any exceptions happen here.
print(api_response)
return
|
JD (JSON Dump) function. Meant for quick pretty-printing of CloudGenix Response objects.
Example: `jd(cgx_sess.get.sites())`
**Returns:** No Return, directly prints all output.
|
entailment
|
def quick_confirm(prompt, default_value):
"""
Function to display a quick confirmation for user input
**Parameters:**
- **prompt:** Text to display before confirm
- **default_value:** Default value for no entry
**Returns:** 'y', 'n', or Default value.
"""
valid = False
value = default_value.lower()
while not valid:
input_val = compat_input(prompt + "[{0}]: ".format(default_value))
if input_val == "":
value = default_value.lower()
valid = True
else:
try:
if input_val.lower() in ['y', 'n']:
value = input_val.lower()
valid = True
else:
print("ERROR: enter 'Y' or 'N'.")
valid = False
except ValueError:
print("ERROR: enter 'Y' or 'N'.")
valid = False
return value
|
Function to display a quick confirmation for user input
**Parameters:**
- **prompt:** Text to display before confirm
- **default_value:** Default value for no entry
**Returns:** 'y', 'n', or Default value.
|
entailment
|
def quick_int_input(prompt, default_value, min_val=1, max_val=30):
"""
Function to display a quick question for integer user input
**Parameters:**
- **prompt:** Text / question to display
- **default_value:** Default value for no entry
- **min_val:** Lowest allowed integer
- **max_val:** Highest allowed integer
**Returns:** integer or default_value.
"""
valid = False
num_val = default_value
while not valid:
input_val = compat_input(prompt + "[{0}]: ".format(default_value))
if input_val == "":
num_val = default_value
valid = True
else:
try:
num_val = int(input_val)
if min_val <= num_val <= max_val:
valid = True
else:
print("ERROR: must be between {0} and {1}.".format(min, max))
valid = False
except ValueError:
print("ERROR: must be a number.")
valid = False
return num_val
|
Function to display a quick question for integer user input
**Parameters:**
- **prompt:** Text / question to display
- **default_value:** Default value for no entry
- **min_val:** Lowest allowed integer
- **max_val:** Highest allowed integer
**Returns:** integer or default_value.
|
entailment
|
def quick_str_input(prompt, default_value):
"""
Function to display a quick question for text input.
**Parameters:**
- **prompt:** Text / question to display
- **default_value:** Default value for no entry
**Returns:** text_type() or default_value.
"""
valid = False
str_val = default_value
while not valid:
input_val = raw_input(prompt + "[{0}]: ".format(default_value))
if input_val == "":
str_val = default_value
valid = True
else:
try:
str_val = text_type(input_val)
valid = True
except ValueError:
print("ERROR: must be text.")
valid = False
return str_val
|
Function to display a quick question for text input.
**Parameters:**
- **prompt:** Text / question to display
- **default_value:** Default value for no entry
**Returns:** text_type() or default_value.
|
entailment
|
def compare_digests(digest_1, digest_2, is_hex_1=True, is_hex_2=True, threshold=None):
"""
computes bit difference between two nilsisa digests
takes params for format, default is hex string but can accept list
of 32 length ints
Optimized method originally from https://gist.github.com/michelp/6255490
If `threshold` is set, and the comparison will be less than
`threshold`, then bail out early and return a value just below the
threshold. This is a speed optimization that accelerates
comparisons of very different items; e.g. tests show a ~20-30% speed
up. `threshold` must be an integer in the range [-128, 128].
"""
# if we have both hexes use optimized method
if threshold is not None:
threshold -= 128
threshold *= -1
if is_hex_1 and is_hex_2:
bits = 0
for i in range_(0, 63, 2):
bits += POPC[255 & int(digest_1[i:i+2], 16) ^ int(digest_2[i:i+2], 16)]
if threshold is not None and bits > threshold: break
return 128 - bits
else:
# at least one of the inputs is a list of unsigned ints
if is_hex_1: digest_1 = convert_hex_to_ints(digest_1)
if is_hex_2: digest_2 = convert_hex_to_ints(digest_2)
bit_diff = 0
for i in range(len(digest_1)):
bit_diff += POPC[255 & digest_1[i] ^ digest_2[i]]
if threshold is not None and bit_diff > threshold: break
return 128 - bit_diff
|
computes bit difference between two nilsisa digests
takes params for format, default is hex string but can accept list
of 32 length ints
Optimized method originally from https://gist.github.com/michelp/6255490
If `threshold` is set, and the comparison will be less than
`threshold`, then bail out early and return a value just below the
threshold. This is a speed optimization that accelerates
comparisons of very different items; e.g. tests show a ~20-30% speed
up. `threshold` must be an integer in the range [-128, 128].
|
entailment
|
def tran_hash(self, a, b, c, n):
"""implementation of the tran53 hash function"""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
|
implementation of the tran53 hash function
|
entailment
|
def process(self, chunk):
"""
computes the hash of all of the trigrams in the chunk using a window
of length 5
"""
self._digest = None
if isinstance(chunk, text_type):
chunk = chunk.encode('utf-8')
# chunk is a byte string
for char in chunk:
self.num_char += 1
if PY3:
# In Python 3, iterating over bytes yields integers
c = char
else:
c = ord(char)
if len(self.window) > 1: # seen at least three characters
self.acc[self.tran_hash(c, self.window[0], self.window[1], 0)] += 1
if len(self.window) > 2: # seen at least four characters
self.acc[self.tran_hash(c, self.window[0], self.window[2], 1)] += 1
self.acc[self.tran_hash(c, self.window[1], self.window[2], 2)] += 1
if len(self.window) > 3: # have a full window
self.acc[self.tran_hash(c, self.window[0], self.window[3], 3)] += 1
self.acc[self.tran_hash(c, self.window[1], self.window[3], 4)] += 1
self.acc[self.tran_hash(c, self.window[2], self.window[3], 5)] += 1
# duplicate hashes, used to maintain 8 trigrams per character
self.acc[self.tran_hash(self.window[3], self.window[0], c, 6)] += 1
self.acc[self.tran_hash(self.window[3], self.window[2], c, 7)] += 1
# add current character to the window, remove the previous character
if len(self.window) < 4:
self.window = [c] + self.window
else:
self.window = [c] + self.window[:3]
|
computes the hash of all of the trigrams in the chunk using a window
of length 5
|
entailment
|
def compute_digest(self):
"""
using a threshold (mean of the accumulator), computes the nilsimsa digest
"""
num_trigrams = 0
if self.num_char == 3: # 3 chars -> 1 trigram
num_trigrams = 1
elif self.num_char == 4: # 4 chars -> 4 trigrams
num_trigrams = 4
elif self.num_char > 4: # > 4 chars -> 8 for each char
num_trigrams = 8 * self.num_char - 28
# threshhold is the mean of the acc buckets
threshold = num_trigrams / 256.0
digest = [0] * 32
for i in range(256):
if self.acc[i] > threshold:
digest[i >> 3] += 1 << (i & 7) # equivalent to i/8, 2**(i mod 7)
self._digest = digest[::-1]
|
using a threshold (mean of the accumulator), computes the nilsimsa digest
|
entailment
|
def from_file(self, fname):
"""read in a file and compute digest"""
f = open(fname, "rb")
data = f.read()
self.update(data)
f.close()
|
read in a file and compute digest
|
entailment
|
def compare(self, digest_2, is_hex = False):
"""
returns difference between the nilsimsa digests between the current
object and a given digest
"""
# convert hex string to list of ints
if is_hex:
digest_2 = convert_hex_to_ints(digest_2)
bit_diff = 0
for i in range(len(self.digest)):
bit_diff += POPC[self.digest[i] ^ digest_2[i]] #computes the bit diff between the i'th position of the digests
return 128 - bit_diff
|
returns difference between the nilsimsa digests between the current
object and a given digest
|
entailment
|
def login(self, data, api_version="v2.0"):
"""
Login api
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/login".format(api_version)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data, sensitive=True)
|
Login api
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
|
entailment
|
def tenant_forgot_password_login(self, data, tenant_id=None, api_version="v2.0"):
"""
Forgot password API
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/login/password/forgot".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data, sensitive=True)
|
Forgot password API
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
|
entailment
|
def is_valid_file(parser,arg):
"""verify the validity of the given file. Never trust the End-User"""
if not os.path.exists(arg):
parser.error("File %s not found"%arg)
else:
return arg
|
verify the validity of the given file. Never trust the End-User
|
entailment
|
def getID(code_file):
"""Get the language ID of the input file language"""
json_path = ghostfolder+'/'+json_file
if os.path.exists(json_path):
pass
else:
download_file('https://ghostbin.com/languages.json')
lang = detect_lang(code_file)
json_data = json.load(file(json_path))#don't think i need this though
ID = ''
for i in range(len(json_data)):
temp = len(json_data[i]['languages'])
for j in range(temp):
if json_data[i]['languages'][j]['name'].lower() == lang.lower():
ID = json_data[i]['languages'][j]['id']
print('Gotten language ID from \'languages.json\': {0}'.format(ID))
return ID
|
Get the language ID of the input file language
|
entailment
|
def detect_lang(path):
"""Detect the language used in the given file."""
blob = FileBlob(path, os.getcwd())
if blob.is_text:
print('Programming language of the file detected: {0}'.format(blob.language.name))
return blob.language.name
else:#images, binary and what-have-you won't be pasted
print('File not a text file. Exiting...')
sys.exit()
|
Detect the language used in the given file.
|
entailment
|
def set_serial(self, android_serial):
"""
Specify given *android_serial* device to perform test.
You do not have to specify the device when there is only one device connects to the computer.
When you need to use multiple devices, do not use this keyword to switch between devices in test execution.
Using different library name when importing this library according to http://robotframework.googlecode.com/hg/doc/userguide/RobotFrameworkUserGuide.html?r=2.8.5.
| Setting | Value | Value | Value |
| Library | Mobile | WITH NAME | Mobile1 |
| Library | Mobile | WITH NAME | Mobile2 |
And set the serial to each library.
| Test Case | Action | Argument |
| Multiple Devices | Mobile1.Set Serial | device_1's serial |
| | Mobile2.Set Serial | device_2's serial |
"""
self.adb = ADB(android_serial)
self.device = Device(android_serial)
self.test_helper = TestHelper(self.adb)
|
Specify given *android_serial* device to perform test.
You do not have to specify the device when there is only one device connects to the computer.
When you need to use multiple devices, do not use this keyword to switch between devices in test execution.
Using different library name when importing this library according to http://robotframework.googlecode.com/hg/doc/userguide/RobotFrameworkUserGuide.html?r=2.8.5.
| Setting | Value | Value | Value |
| Library | Mobile | WITH NAME | Mobile1 |
| Library | Mobile | WITH NAME | Mobile2 |
And set the serial to each library.
| Test Case | Action | Argument |
| Multiple Devices | Mobile1.Set Serial | device_1's serial |
| | Mobile2.Set Serial | device_2's serial |
|
entailment
|
def click_at_coordinates(self, x, y):
"""
Click at (x,y) coordinates.
"""
self.device.click(int(x), int(y))
|
Click at (x,y) coordinates.
|
entailment
|
def swipe_by_coordinates(self, sx, sy, ex, ey, steps=10):
"""
Swipe from (sx, sy) to (ex, ey) with *steps* .
Example:
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 |
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 |
"""
self.device.swipe(sx, sy, ex, ey, steps)
|
Swipe from (sx, sy) to (ex, ey) with *steps* .
Example:
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 |
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 |
|
entailment
|
def swipe_left(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to left.
Example:
| Swipe Left | description=Home screen 3 | | # swipe the UI object left |
| Swipe Left | 5 | description=Home screen 3 | # swipe the UI object left with steps=5 |
See `introduction` for details about Identified UI object.
"""
self.device(**selectors).swipe.left(steps=steps)
|
Swipe the UI object with *selectors* from center to left.
Example:
| Swipe Left | description=Home screen 3 | | # swipe the UI object left |
| Swipe Left | 5 | description=Home screen 3 | # swipe the UI object left with steps=5 |
See `introduction` for details about Identified UI object.
|
entailment
|
def swipe_right(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.right(steps=steps)
|
Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details.
|
entailment
|
def swipe_top(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to top
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.up(steps=steps)
|
Swipe the UI object with *selectors* from center to top
See `Swipe Left` for more details.
|
entailment
|
def swipe_bottom(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.down(steps=steps)
|
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
|
entailment
|
def drag_by_coordinates(self,sx, sy, ex, ey, steps=10):
"""
Drag from (sx, sy) to (ex, ey) with steps
See `Swipe By Coordinates` also.
"""
self.device.drag(sx, sy, ex, ey, steps)
|
Drag from (sx, sy) to (ex, ey) with steps
See `Swipe By Coordinates` also.
|
entailment
|
def wait_for_exists(self, timeout=0, *args, **selectors):
"""
Wait for the object which has *selectors* within the given timeout.
Return true if the object *appear* in the given timeout. Else return false.
"""
return self.device(**selectors).wait.exists(timeout=timeout)
|
Wait for the object which has *selectors* within the given timeout.
Return true if the object *appear* in the given timeout. Else return false.
|
entailment
|
def wait_until_gone(self, timeout=0, *args, **selectors):
"""
Wait for the object which has *selectors* within the given timeout.
Return true if the object *disappear* in the given timeout. Else return false.
"""
return self.device(**selectors).wait.gone(timeout=timeout)
|
Wait for the object which has *selectors* within the given timeout.
Return true if the object *disappear* in the given timeout. Else return false.
|
entailment
|
def fling_forward_horizontally(self, *args, **selectors):
"""
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.forward()
|
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
|
entailment
|
def fling_backward_horizontally(self, *args, **selectors):
"""
Perform fling backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.backward()
|
Perform fling backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
|
entailment
|
def fling_forward_vertically(self, *args, **selectors):
"""
Perform fling forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.vert.forward()
|
Perform fling forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
|
entailment
|
def fling_backward_vertically(self, *args, **selectors):
"""
Perform fling backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.vert.backward()
|
Perform fling backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
|
entailment
|
def scroll_to_beginning_horizontally(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* horizontally.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.toBeginning(steps=steps)
|
Scroll the object which has *selectors* attributes to *beginning* horizontally.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_to_end_horizontally(self, steps=10, *args, **selectors):
"""
Scroll the object which has *selectors* attributes to *end* horizontally.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.toEnd(steps=steps)
|
Scroll the object which has *selectors* attributes to *end* horizontally.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_forward_horizontally(self, steps=10, *args, **selectors):
"""
Perform scroll forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.forward(steps=steps)
|
Perform scroll forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_backward_horizontally(self, steps=10, *args, **selectors):
"""
Perform scroll backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.backward(steps=steps)
|
Perform scroll backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_to_horizontally(self, obj, *args,**selectors):
"""
Scroll(horizontally) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
See `Scroll To Vertically` for more details.
"""
return obj.scroll.horiz.to(**selectors)
|
Scroll(horizontally) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
See `Scroll To Vertically` for more details.
|
entailment
|
def scroll_to_beginning_vertically(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toBeginning(steps=steps)
|
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_to_end_vertically(self, steps=10, *args, **selectors):
"""
Scroll the object which has *selectors* attributes to *end* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toEnd(steps=steps)
|
Scroll the object which has *selectors* attributes to *end* vertically.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_forward_vertically(self, steps=10, *args, **selectors):
"""
Perform scroll forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
Example:
| ${can_be_scroll} | Scroll Forward Vertically | className=android.widget.ListView | | # Scroll forward the UI object with class name |
| ${can_be_scroll} | Scroll Forward Vertically | 100 | className=android.widget.ListView | # Scroll with steps |
"""
return self.device(**selectors).scroll.vert.forward(steps=steps)
|
Perform scroll forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
Example:
| ${can_be_scroll} | Scroll Forward Vertically | className=android.widget.ListView | | # Scroll forward the UI object with class name |
| ${can_be_scroll} | Scroll Forward Vertically | 100 | className=android.widget.ListView | # Scroll with steps |
|
entailment
|
def scroll_backward_vertically(self, steps=10, *args, **selectors):
"""
Perform scroll backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.backward(steps=steps)
|
Perform scroll backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
|
entailment
|
def scroll_to_vertically(self, obj, *args,**selectors):
"""
Scroll(vertically) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
Example:
| ${list} | Get Object | className=android.widget.ListView | | # Get the list object |
| ${is_web_view} | Scroll To Vertically | ${list} | text=WebView | # Scroll to text:WebView. |
"""
return obj.scroll.vert.to(**selectors)
|
Scroll(vertically) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
Example:
| ${list} | Get Object | className=android.widget.ListView | | # Get the list object |
| ${is_web_view} | Scroll To Vertically | ${list} | text=WebView | # Scroll to text:WebView. |
|
entailment
|
def screenshot(self, scale=None, quality=None):
"""
Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality
default scale=1.0 quality=100
"""
output_dir = BuiltIn().get_variable_value('${OUTPUTDIR}')
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
screenshot_path = '%s%s%s.png' % (output_dir, os.sep, st)
self.device.screenshot(screenshot_path, scale, quality)
logger.info('\n<a href="%s">%s</a><br><img src="%s">' % (screenshot_path, st, screenshot_path), html=True)
|
Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality
default scale=1.0 quality=100
|
entailment
|
def register_click_watcher(self, watcher_name, selectors, *condition_list):
"""
The watcher click on the object which has the *selectors* when conditions match.
"""
watcher = self.device.watcher(watcher_name)
for condition in condition_list:
watcher.when(**self.__unicode_to_dict(condition))
watcher.click(**self.__unicode_to_dict(selectors))
self.device.watchers.run()
|
The watcher click on the object which has the *selectors* when conditions match.
|
entailment
|
def register_press_watcher(self, watcher_name, press_keys, *condition_list):
"""
The watcher perform *press_keys* action sequentially when conditions match.
"""
def unicode_to_list(a_unicode):
a_list = list()
comma_count = a_unicode.count(',')
for count in range(comma_count + 1):
comma_position = a_unicode.find(',')
if comma_position == -1:
a_list.append(str(a_unicode))
else:
a_list.append(a_unicode[0:comma_position])
a_unicode = a_unicode[comma_position + 1:]
return a_list
watcher = self.device.watcher(watcher_name)
for condition in condition_list:
watcher.when(**self.__unicode_to_dict(condition))
watcher.press(*unicode_to_list(press_keys))
self.device.watchers.run()
|
The watcher perform *press_keys* action sequentially when conditions match.
|
entailment
|
def remove_watchers(self, watcher_name = None):
"""
Remove watcher with *watcher_name* or remove all watchers.
"""
if watcher_name == None:
self.device.watchers.remove()
else:
self.device.watchers.remove(watcher_name)
|
Remove watcher with *watcher_name* or remove all watchers.
|
entailment
|
def get_count(self, *args, **selectors):
"""
Return the count of UI object with *selectors*
Example:
| ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility |
| ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination |
| ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
"""
obj = self.get_object(**selectors)
return self.get_count_of_object(obj)
|
Return the count of UI object with *selectors*
Example:
| ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility |
| ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination |
| ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
|
entailment
|
def get_info_of_object(self, obj, selector=None):
"""
return info dictionary of the *obj*
The info example:
{
u'contentDescription': u'',
u'checked': False,
u'scrollable': True,
u'text': u'',
u'packageName': u'com.android.launcher',
u'selected': False,
u'enabled': True,
u'bounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
},
u'className': u'android.view.View',
u'focusable': False,
u'focused': False,
u'clickable': False,
u'checkable': False,
u'chileCount': 1,
u'longClickable': False,
u'visibleBounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
}
}
"""
if selector:
return obj.info.get(selector)
else:
return obj.info
|
return info dictionary of the *obj*
The info example:
{
u'contentDescription': u'',
u'checked': False,
u'scrollable': True,
u'text': u'',
u'packageName': u'com.android.launcher',
u'selected': False,
u'enabled': True,
u'bounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
},
u'className': u'android.view.View',
u'focusable': False,
u'focused': False,
u'clickable': False,
u'checkable': False,
u'chileCount': 1,
u'longClickable': False,
u'visibleBounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
}
}
|
entailment
|
def call(self, obj, method, *args, **selectors):
"""
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
"""
func = getattr(obj, method)
return func(**selectors)
|
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
|
entailment
|
def set_text(self, input_text, *args, **selectors):
"""
Set *input_text* to the UI object with *selectors*
"""
self.device(**selectors).set_text(input_text)
|
Set *input_text* to the UI object with *selectors*
|
entailment
|
def clear_text(self, *args, **selectors):
"""
Clear text of the UI object with *selectors*
"""
while True:
target = self.device(**selectors)
text = target.info['text']
target.clear_text()
remain_text = target.info['text']
if text == '' or remain_text == text:
break
|
Clear text of the UI object with *selectors*
|
entailment
|
def open_notification(self):
"""
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
"""
sdk_version = self.device.info['sdkInt']
if sdk_version < 18:
height = self.device.info['displayHeight']
self.device.swipe(1, 1, 1, height - 1, 1)
else:
self.device.open.notification()
|
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
|
entailment
|
def sleep(self, time):
"""
Sleep(no action) for *time* (in millisecond)
"""
target = 'wait for %s' % str(time)
self.device(text=target).wait.exists(timeout=time)
|
Sleep(no action) for *time* (in millisecond)
|
entailment
|
def connect_to_wifi(self, ssid, password=None):
"""
[Test Agent]
Connect to *ssid* with *password*
"""
cmd = 'am broadcast -a testagent -e action CONNECT_TO_WIFI -e ssid %s -e password %s' % (ssid, password)
self.adb.shell_cmd(cmd)
|
[Test Agent]
Connect to *ssid* with *password*
|
entailment
|
def merge_sims(oldsims, newsims, clip=None):
"""Merge two precomputed similarity lists, truncating the result to `clip` most similar items."""
if oldsims is None:
result = newsims or []
elif newsims is None:
result = oldsims
else:
result = sorted(oldsims + newsims, key=lambda item: -item[1])
if clip is not None:
result = result[:clip]
return result
|
Merge two precomputed similarity lists, truncating the result to `clip` most similar items.
|
entailment
|
def terminate(self):
"""Delete all files created by this index, invalidating `self`. Use with care."""
try:
self.id2sims.terminate()
except:
pass
import glob
for fname in glob.glob(self.fname + '*'):
try:
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s: %s" % (fname, e))
for val in self.__dict__.keys():
try:
delattr(self, val)
except:
pass
|
Delete all files created by this index, invalidating `self`. Use with care.
|
entailment
|
def index_documents(self, fresh_docs, model):
"""
Update fresh index with new documents (potentially replacing old ones with
the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc)
that maps document_id->document.
"""
docids = fresh_docs.keys()
vectors = (model.docs2vecs(fresh_docs[docid] for docid in docids))
logger.info("adding %i documents to %s" % (len(docids), self))
self.qindex.add_documents(vectors)
self.qindex.save()
self.update_ids(docids)
|
Update fresh index with new documents (potentially replacing old ones with
the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc)
that maps document_id->document.
|
entailment
|
def update_ids(self, docids):
"""Update id->pos mapping with new document ids."""
logger.info("updating %i id mappings" % len(docids))
for docid in docids:
if docid is not None:
pos = self.id2pos.get(docid, None)
if pos is not None:
logger.info("replacing existing document %r in %s" % (docid, self))
del self.pos2id[pos]
self.id2pos[docid] = self.length
try:
del self.id2sims[docid]
except:
pass
self.length += 1
self.id2sims.sync()
self.update_mappings()
|
Update id->pos mapping with new document ids.
|
entailment
|
def update_mappings(self):
"""Synchronize id<->position mappings."""
self.pos2id = dict((v, k) for k, v in self.id2pos.iteritems())
assert len(self.pos2id) == len(self.id2pos), "duplicate ids or positions detected"
|
Synchronize id<->position mappings.
|
entailment
|
def delete(self, docids):
"""Delete documents (specified by their ids) from the index."""
logger.debug("deleting %i documents from %s" % (len(docids), self))
deleted = 0
for docid in docids:
try:
del self.id2pos[docid]
deleted += 1
del self.id2sims[docid]
except:
pass
self.id2sims.sync()
if deleted:
logger.info("deleted %i documents from %s" % (deleted, self))
self.update_mappings()
|
Delete documents (specified by their ids) from the index.
|
entailment
|
def sims2scores(self, sims, eps=1e-7):
"""Convert raw similarity vector to a list of (docid, similarity) results."""
result = []
if isinstance(sims, numpy.ndarray):
sims = abs(sims) # TODO or maybe clip? are opposite vectors "similar" or "dissimilar"?!
for pos in numpy.argsort(sims)[::-1]:
if pos in self.pos2id and sims[pos] > eps: # ignore deleted/rewritten documents
# convert positions of resulting docs back to ids
result.append((self.pos2id[pos], sims[pos]))
if len(result) == self.topsims:
break
else:
for pos, score in sims:
if pos in self.pos2id and abs(score) > eps: # ignore deleted/rewritten documents
# convert positions of resulting docs back to ids
result.append((self.pos2id[pos], abs(score)))
if len(result) == self.topsims:
break
return result
|
Convert raw similarity vector to a list of (docid, similarity) results.
|
entailment
|
def vec_by_id(self, docid):
"""Return indexed vector corresponding to document `docid`."""
pos = self.id2pos[docid]
return self.qindex.vector_by_id(pos)
|
Return indexed vector corresponding to document `docid`.
|
entailment
|
def sims_by_id(self, docid):
"""Find the most similar documents to the (already indexed) document with `docid`."""
result = self.id2sims.get(docid, None)
if result is None:
self.qindex.num_best = self.topsims
sims = self.qindex.similarity_by_id(self.id2pos[docid])
result = self.sims2scores(sims)
return result
|
Find the most similar documents to the (already indexed) document with `docid`.
|
entailment
|
def sims_by_vec(self, vec, normalize=None):
"""
Find the most similar documents to a given vector (=already processed document).
"""
if normalize is None:
normalize = self.qindex.normalize
norm, self.qindex.normalize = self.qindex.normalize, normalize # store old value
self.qindex.num_best = self.topsims
sims = self.qindex[vec]
self.qindex.normalize = norm # restore old value of qindex.normalize
return self.sims2scores(sims)
|
Find the most similar documents to a given vector (=already processed document).
|
entailment
|
def merge(self, other):
"""Merge documents from the other index. Update precomputed similarities
in the process."""
other.qindex.normalize, other.qindex.num_best = False, self.topsims
# update precomputed "most similar" for old documents (in case some of
# the new docs make it to the top-N for some of the old documents)
logger.info("updating old precomputed values")
pos, lenself = 0, len(self.qindex)
for chunk in self.qindex.iter_chunks():
for sims in other.qindex[chunk]:
if pos in self.pos2id:
# ignore masked entries (deleted, overwritten documents)
docid = self.pos2id[pos]
sims = self.sims2scores(sims)
self.id2sims[docid] = merge_sims(self.id2sims[docid], sims, self.topsims)
pos += 1
if pos % 10000 == 0:
logger.info("PROGRESS: updated doc #%i/%i" % (pos, lenself))
self.id2sims.sync()
logger.info("merging fresh index into optimized one")
pos, docids = 0, []
for chunk in other.qindex.iter_chunks():
for vec in chunk:
if pos in other.pos2id: # don't copy deleted documents
self.qindex.add_documents([vec])
docids.append(other.pos2id[pos])
pos += 1
self.qindex.save()
self.update_ids(docids)
logger.info("precomputing most similar for the fresh index")
pos, lenother = 0, len(other.qindex)
norm, self.qindex.normalize = self.qindex.normalize, False
topsims, self.qindex.num_best = self.qindex.num_best, self.topsims
for chunk in other.qindex.iter_chunks():
for sims in self.qindex[chunk]:
if pos in other.pos2id:
# ignore masked entries (deleted, overwritten documents)
docid = other.pos2id[pos]
self.id2sims[docid] = self.sims2scores(sims)
pos += 1
if pos % 10000 == 0:
logger.info("PROGRESS: precomputed doc #%i/%i" % (pos, lenother))
self.qindex.normalize, self.qindex.num_best = norm, topsims
self.id2sims.sync()
|
Merge documents from the other index. Update precomputed similarities
in the process.
|
entailment
|
def doc2vec(self, doc):
"""Convert a single SimilarityDocument to vector."""
bow = self.dictionary.doc2bow(doc['tokens'])
if self.method == 'lsi':
return self.lsi[self.tfidf[bow]]
elif self.method == 'lda':
return self.lda[bow]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bow]]
elif self.method == 'logentropy':
return self.logent[bow]
|
Convert a single SimilarityDocument to vector.
|
entailment
|
def docs2vecs(self, docs):
"""Convert multiple SimilarityDocuments to vectors (batch version of doc2vec)."""
bows = (self.dictionary.doc2bow(doc['tokens']) for doc in docs)
if self.method == 'lsi':
return self.lsi[self.tfidf[bows]]
elif self.method == 'lda':
return self.lda[bows]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bows]]
elif self.method == 'logentropy':
return self.logent[bows]
|
Convert multiple SimilarityDocuments to vectors (batch version of doc2vec).
|
entailment
|
def flush(self, save_index=False, save_model=False, clear_buffer=False):
"""Commit all changes, clear all caches."""
if save_index:
if self.fresh_index is not None:
self.fresh_index.save(self.location('index_fresh'))
if self.opt_index is not None:
self.opt_index.save(self.location('index_opt'))
if save_model:
if self.model is not None:
self.model.save(self.location('model'))
self.payload.commit()
if clear_buffer:
if hasattr(self, 'fresh_docs'):
try:
self.fresh_docs.terminate() # erase all buffered documents + file on disk
except:
pass
self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp
self.fresh_docs.sync()
|
Commit all changes, clear all caches.
|
entailment
|
def close(self):
"""Explicitly close open file handles, databases etc."""
try:
self.payload.close()
except:
pass
try:
self.model.close()
except:
pass
try:
self.fresh_index.close()
except:
pass
try:
self.opt_index.close()
except:
pass
try:
self.fresh_docs.terminate()
except:
pass
|
Explicitly close open file handles, databases etc.
|
entailment
|
def buffer(self, documents):
"""
Add a sequence of documents to be processed (indexed or trained on).
Here, the documents are simply collected; real processing is done later,
during the `self.index` or `self.train` calls.
`buffer` can be called repeatedly; the result is the same as if it was
called once, with a concatenation of all the partial document batches.
The point is to save memory when sending large corpora over network: the
entire `documents` must be serialized into RAM. See `utils.upload_chunked()`.
A call to `flush()` clears this documents-to-be-processed buffer (`flush`
is also implicitly called when you call `index()` and `train()`).
"""
logger.info("adding documents to temporary buffer of %s" % (self))
for doc in documents:
docid = doc['id']
# logger.debug("buffering document %r" % docid)
if docid in self.fresh_docs:
logger.warning("asked to re-add id %r; rewriting old value" % docid)
self.fresh_docs[docid] = doc
self.fresh_docs.sync()
|
Add a sequence of documents to be processed (indexed or trained on).
Here, the documents are simply collected; real processing is done later,
during the `self.index` or `self.train` calls.
`buffer` can be called repeatedly; the result is the same as if it was
called once, with a concatenation of all the partial document batches.
The point is to save memory when sending large corpora over network: the
entire `documents` must be serialized into RAM. See `utils.upload_chunked()`.
A call to `flush()` clears this documents-to-be-processed buffer (`flush`
is also implicitly called when you call `index()` and `train()`).
|
entailment
|
def train(self, corpus=None, method='auto', clear_buffer=True, params=None):
"""
Create an indexing model. Will overwrite the model if it already exists.
All indexes become invalid, because documents in them use a now-obsolete
representation.
The model is trained on documents previously entered via `buffer`,
or directly on `corpus`, if specified.
"""
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "train called but no training corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if method == 'auto':
numdocs = len(self.fresh_docs)
if numdocs < 1000:
logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing")
method = 'logentropy'
else:
method = 'lsi'
if params is None:
params = {}
self.model = SimModel(self.fresh_docs, method=method, params=params)
self.flush(save_model=True, clear_buffer=clear_buffer)
|
Create an indexing model. Will overwrite the model if it already exists.
All indexes become invalid, because documents in them use a now-obsolete
representation.
The model is trained on documents previously entered via `buffer`,
or directly on `corpus`, if specified.
|
entailment
|
def index(self, corpus=None, clear_buffer=True):
"""
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
"""
if not self.model:
msg = 'must initialize model for %s before indexing documents' % self.basename
logger.error(msg)
raise AttributeError(msg)
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "index called but no indexing corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if not self.fresh_index:
logger.info("starting a new fresh index for %s" % self)
self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features)
self.fresh_index.index_documents(self.fresh_docs, self.model)
if self.opt_index is not None:
self.opt_index.delete(self.fresh_docs.keys())
logger.info("storing document payloads")
for docid in self.fresh_docs:
payload = self.fresh_docs[docid].get('payload', None)
if payload is None:
# HACK: exit on first doc without a payload (=assume all docs have payload, or none does)
break
self.payload[docid] = payload
self.flush(save_index=True, clear_buffer=clear_buffer)
|
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
|
entailment
|
def optimize(self):
"""
Precompute top similarities for all indexed documents. This speeds up
`find_similar` queries by id (but not queries by fulltext).
Internally, documents are moved from a fresh index (=no precomputed similarities)
to an optimized index (precomputed similarities). Similarity queries always
query both indexes, so this split is transparent to clients.
If you add documents later via `index`, they go to the fresh index again.
To precompute top similarities for these new documents too, simply call
`optimize` again.
"""
if self.fresh_index is None:
logger.warning("optimize called but there are no new documents")
return # nothing to do!
if self.opt_index is None:
logger.info("starting a new optimized index for %s" % self)
self.opt_index = SimIndex(self.location('index_opt'), self.model.num_features)
self.opt_index.merge(self.fresh_index)
self.fresh_index.terminate() # delete old files
self.fresh_index = None
self.flush(save_index=True)
|
Precompute top similarities for all indexed documents. This speeds up
`find_similar` queries by id (but not queries by fulltext).
Internally, documents are moved from a fresh index (=no precomputed similarities)
to an optimized index (precomputed similarities). Similarity queries always
query both indexes, so this split is transparent to clients.
If you add documents later via `index`, they go to the fresh index again.
To precompute top similarities for these new documents too, simply call
`optimize` again.
|
entailment
|
def drop_index(self, keep_model=True):
"""Drop all indexed documents. If `keep_model` is False, also dropped the model."""
modelstr = "" if keep_model else "and model "
logger.info("deleting similarity index " + modelstr + "from %s" % self.basename)
# delete indexes
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.terminate()
self.fresh_index, self.opt_index = None, None
# delete payload
if self.payload is not None:
self.payload.close()
fname = self.location('payload')
try:
if os.path.exists(fname):
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s" % fname)
self.payload = SqliteDict(self.location('payload'), autocommit=True, journal_mode=JOURNAL_MODE)
# optionally, delete the model as well
if not keep_model and self.model is not None:
self.model.close()
fname = self.location('model')
try:
if os.path.exists(fname):
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s" % fname)
self.model = None
self.flush(save_index=True, save_model=True, clear_buffer=True)
|
Drop all indexed documents. If `keep_model` is False, also dropped the model.
|
entailment
|
def delete(self, docids):
"""Delete specified documents from the index."""
logger.info("asked to drop %i documents" % len(docids))
for index in [self.opt_index, self.fresh_index]:
if index is not None:
index.delete(docids)
self.flush(save_index=True)
|
Delete specified documents from the index.
|
entailment
|
def find_similar(self, doc, min_score=0.0, max_results=100):
"""
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
"""
logger.debug("received query call with %r" % doc)
if self.is_locked():
msg = "cannot query while the server is being updated"
logger.error(msg)
raise RuntimeError(msg)
sims_opt, sims_fresh = None, None
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.topsims = max_results
if isinstance(doc, basestring):
# query by direct document id
docid = doc
if self.opt_index is not None and docid in self.opt_index:
sims_opt = self.opt_index.sims_by_id(docid)
if self.fresh_index is not None:
vec = self.opt_index.vec_by_id(docid)
sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False)
elif self.fresh_index is not None and docid in self.fresh_index:
sims_fresh = self.fresh_index.sims_by_id(docid)
if self.opt_index is not None:
vec = self.fresh_index.vec_by_id(docid)
sims_opt = self.opt_index.sims_by_vec(vec, normalize=False)
else:
raise ValueError("document %r not in index" % docid)
else:
if 'topics' in doc:
# user supplied vector directly => use that
vec = gensim.matutils.any2sparse(doc['topics'])
else:
# query by an arbitrary text (=tokens) inside doc['tokens']
vec = self.model.doc2vec(doc) # convert document (text) to vector
if self.opt_index is not None:
sims_opt = self.opt_index.sims_by_vec(vec)
if self.fresh_index is not None:
sims_fresh = self.fresh_index.sims_by_vec(vec)
merged = merge_sims(sims_opt, sims_fresh)
logger.debug("got %s raw similars, pruning with max_results=%s, min_score=%s" %
(len(merged), max_results, min_score))
result = []
for docid, score in merged:
if score < min_score or 0 < max_results <= len(result):
break
result.append((docid, float(score), self.payload.get(docid, None)))
return result
|
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
|
entailment
|
def keys(self):
"""Return ids of all indexed documents."""
result = []
if self.fresh_index is not None:
result += self.fresh_index.keys()
if self.opt_index is not None:
result += self.opt_index.keys()
return result
|
Return ids of all indexed documents.
|
entailment
|
def check_session(self):
"""
Make sure a session is open.
If it's not and autosession is turned on, create a new session automatically.
If it's not and autosession is off, raise an exception.
"""
if self.session is None:
if self.autosession:
self.open_session()
else:
msg = "must open a session before modifying %s" % self
raise RuntimeError(msg)
|
Make sure a session is open.
If it's not and autosession is turned on, create a new session automatically.
If it's not and autosession is off, raise an exception.
|
entailment
|
def open_session(self):
"""
Open a new session to modify this server.
You can either call this fnc directly, or turn on autosession which will
open/commit sessions for you transparently.
"""
if self.session is not None:
msg = "session already open; commit it or rollback before opening another one in %s" % self
logger.error(msg)
raise RuntimeError(msg)
logger.info("opening a new session")
logger.info("removing %s" % self.loc_session)
try:
shutil.rmtree(self.loc_session)
except:
logger.info("failed to delete %s" % self.loc_session)
logger.info("cloning server from %s to %s" %
(self.loc_stable, self.loc_session))
shutil.copytree(self.loc_stable, self.loc_session)
self.session = SimServer(self.loc_session, use_locks=self.use_locks)
self.lock_update.acquire()
|
Open a new session to modify this server.
You can either call this fnc directly, or turn on autosession which will
open/commit sessions for you transparently.
|
entailment
|
def buffer(self, *args, **kwargs):
"""Buffer documents, in the current session"""
self.check_session()
result = self.session.buffer(*args, **kwargs)
return result
|
Buffer documents, in the current session
|
entailment
|
def index(self, *args, **kwargs):
"""Index documents, in the current session"""
self.check_session()
result = self.session.index(*args, **kwargs)
if self.autosession:
self.commit()
return result
|
Index documents, in the current session
|
entailment
|
def drop_index(self, keep_model=True):
"""Drop all indexed documents from the session. Optionally, drop model too."""
self.check_session()
result = self.session.drop_index(keep_model)
if self.autosession:
self.commit()
return result
|
Drop all indexed documents from the session. Optionally, drop model too.
|
entailment
|
def delete(self, docids):
"""Delete documents from the current session."""
self.check_session()
result = self.session.delete(docids)
if self.autosession:
self.commit()
return result
|
Delete documents from the current session.
|
entailment
|
def optimize(self):
"""Optimize index for faster by-document-id queries."""
self.check_session()
result = self.session.optimize()
if self.autosession:
self.commit()
return result
|
Optimize index for faster by-document-id queries.
|
entailment
|
def commit(self):
"""Commit changes made by the latest session."""
if self.session is not None:
logger.info("committing transaction in %s" % self)
tmp = self.stable
self.stable, self.session = self.session, None
self.istable = 1 - self.istable
self.write_istable()
tmp.close() # don't wait for gc, release resources manually
self.lock_update.release()
else:
logger.warning("commit called but there's no open session in %s" % self)
|
Commit changes made by the latest session.
|
entailment
|
def rollback(self):
"""Ignore all changes made in the latest session (terminate the session)."""
if self.session is not None:
logger.info("rolling back transaction in %s" % self)
self.session.close()
self.session = None
self.lock_update.release()
else:
logger.warning("rollback called but there's no open session in %s" % self)
|
Ignore all changes made in the latest session (terminate the session).
|
entailment
|
def set_autosession(self, value=None):
"""
Turn autosession (automatic committing after each modification call) on/off.
If value is None, only query the current value (don't change anything).
"""
if value is not None:
self.rollback()
self.autosession = value
return self.autosession
|
Turn autosession (automatic committing after each modification call) on/off.
If value is None, only query the current value (don't change anything).
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.