text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def discrete(cats, name='discrete'): """Return a class category that shows the encoding"""
import json ks = list(cats) for key in ks: if isinstance(key, bytes): cats[key.decode('utf-8')] = cats.pop(key) return 'discrete(' + json.dumps([cats, name]) + ')'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clear_cache(dataset_name=None): """Remove a data set from the cache"""
dr = data_resources[dataset_name] if 'dirs' in dr: for dirs, files in zip(dr['dirs'], dr['files']): for dir, file in zip(dirs, files): path = os.path.join(data_path, dataset_name, dir, file) if os.path.exists(path): logging.info("clear_cache: removing " + path) os.unlink(path) for dir in dirs: path = os.path.join(data_path, dataset_name, dir) if os.path.exists(path): logging.info("clear_cache: remove directory " + path) os.rmdir(path) else: for file_list in dr['files']: for file in file_list: path = os.path.join(data_path, dataset_name, file) if os.path.exists(path): logging.info("clear_cache: remove " + path) os.unlink(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_arff(dataset, **kwargs): """Take a pods data set and write it as an ARFF file"""
pods_data = dataset(**kwargs) vals = list(kwargs.values()) for i, v in enumerate(vals): if isinstance(v, list): vals[i] = '|'.join(v) else: vals[i] = str(v) args = '_'.join(vals) n = dataset.__name__ if len(args)>0: n += '_' + args n = n.replace(' ', '-') ks = pods_data.keys() d = None if 'Y' in ks and 'X' in ks: d = pd.DataFrame(pods_data['X']) if 'Xtest' in ks: d = d.append(pd.DataFrame(pods_data['Xtest']), ignore_index=True) if 'covariates' in ks: d.columns = pods_data['covariates'] dy = pd.DataFrame(pods_data['Y']) if 'Ytest' in ks: dy = dy.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True) if 'response' in ks: dy.columns = pods_data['response'] for c in dy.columns: if c not in d.columns: d[c] = dy[c] else: d['y'+str(c)] = dy[c] elif 'Y' in ks: d = pd.DataFrame(pods_data['Y']) if 'Ytest' in ks: d = d.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True) elif 'data' in ks: d = pd.DataFrame(pods_data['data']) if d is not None: df2arff(d, n, pods_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def epomeo_gpx(data_set='epomeo_gpx', sample_every=4): """Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
import gpxpy import gpxpy.gpx if not data_available(data_set): download_data(data_set) files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet'] X = [] for file in files: gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r') gpx = gpxpy.parse(gpx_file) segment = gpx.tracks[0].segments[0] points = [point for track in gpx.tracks for segment in track.segments for point in segment.points] data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points] X.append(np.asarray(data)[::sample_every, :]) gpx_file.close() if pandas_available: X = pd.DataFrame(X[0], columns=['seconds', 'latitude', 'longitude', 'elevation']) X.set_index(keys='seconds', inplace=True) return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pmlr(volumes='all', data_set='pmlr'): """Abstracts from the Proceedings of Machine Learning Research"""
if not data_available(data_set): download_data(data_set) proceedings_file = open(os.path.join(data_path, data_set, 'proceedings.yaml'), 'r') import yaml proceedings = yaml.load(proceedings_file) # Create a new resources entry for downloading contents of proceedings. data_name_full = 'pmlr_volumes' data_resources[data_name_full] = data_resources[data_set].copy() data_resources[data_name_full]['files'] = [] data_resources[data_name_full]['dirs'] = [] data_resources[data_name_full]['urls'] = [] for entry in proceedings: if volumes=='all' or entry['volume'] in volumes: file = entry['yaml'].split('/')[-1] dir = 'v' + str(entry['volume']) data_resources[data_name_full]['files'].append([file]) data_resources[data_name_full]['dirs'].append([dir]) data_resources[data_name_full]['urls'].append(data_resources[data_set]['urls'][0]) Y = [] # Download the volume data if not data_available(data_name_full): download_data(data_name_full) for entry in reversed(proceedings): volume = entry['volume'] if volumes == 'all' or volume in volumes: file = entry['yaml'].split('/')[-1] volume_file = open(os.path.join( data_path, data_name_full, 'v'+str(volume), file ), 'r') Y+=yaml.load(volume_file) if pandas_available: Y = pd.DataFrame(Y) Y['published'] = pd.to_datetime(Y['published']) #Y.columns.values[4] = json_object('authors') #Y.columns.values[7] = json_object('editors') Y['issued'] = Y['issued'].apply(lambda x: np.datetime64(datetime.datetime(*x['date-parts']))) Y['author'] = Y['author'].apply(lambda x: [str(author['given']) + ' ' + str(author['family']) for author in x]) Y['editor'] = Y['editor'].apply(lambda x: [str(editor['given']) + ' ' + str(editor['family']) for editor in x]) columns = list(Y.columns) columns[14] = datetime64_('published') columns[11] = datetime64_('issued') Y.columns = columns return data_details_return({'Y' : Y, 'info' : 'Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.'}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lee_yeast_ChIP(data_set='lee_yeast_ChIP'): """Yeast ChIP data from Lee et al."""
if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'binding_by_gene.tsv') S = read_csv(filename, header=1, index_col=0, sep='\t') transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed'] annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']] S = S[transcription_factors] return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def osu_run1(data_set='osu_run1', sample_every=4): """Ohio State University's Run1 motion capture data set."""
path = os.path.join(data_path, data_set) if not data_available(data_set): import zipfile download_data(data_set) zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r') for name in zip.namelist(): zip.extract(name, path) from . import mocap Y, connect = mocap.load_text_data('Aug210106', path) Y = Y[0:-1:sample_every, :] return data_details_return({'Y': Y, 'connect' : connect}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toy_linear_1d_classification(seed=default_seed): """Simple classification data in one dimension for illustrating models."""
def sample_class(f): p = 1. / (1. + np.exp(-f)) c = np.random.binomial(1, p) c = np.where(c, 1, -1) return c np.random.seed(seed=seed) x1 = np.random.normal(-3, 5, 20) x2 = np.random.normal(3, 5, 20) X = (np.r_[x1, x2])[:, None] return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def olympic_sprints(data_set='rogers_girolami_data'): """All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2)) Y = np.zeros((0, 1)) cats = {} for i, dataset in enumerate([olympic_100m_men, olympic_100m_women, olympic_200m_men, olympic_200m_women, olympic_400m_men, olympic_400m_women]): data = dataset() year = data['X'] time = data['Y'] X = np.vstack((X, np.hstack((year, np.ones_like(year)*i)))) Y = np.vstack((Y, time)) cats[dataset.__name__] = i data['X'] = X data['Y'] = Y data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning." return data_details_return({ 'X': X, 'Y': Y, 'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')], 'response' : ['time'], 'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.", 'output_info': { 0:'100m Men', 1:'100m Women', 2:'200m Men', 3:'200m Women', 4:'400m Men', 5:'400m Women'} }, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def movielens100k(data_set='movielens100k'): """Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not data_available(data_set): import zipfile download_data(data_set) dir_path = os.path.join(data_path, data_set) zip = zipfile.ZipFile(os.path.join(dir_path, 'ml-100k.zip'), 'r') for name in zip.namelist(): zip.extract(name, dir_path) import pandas as pd encoding = 'latin-1' movie_path = os.path.join(data_path, 'movielens100k', 'ml-100k') items = pd.read_csv(os.path.join(movie_path, 'u.item'), index_col = 'index', header=None, sep='|',names=['index', 'title', 'date', 'empty', 'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation', 'Children''s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'], encoding=encoding) users = pd.read_csv(os.path.join(movie_path, 'u.user'), index_col = 'index', header=None, sep='|', names=['index', 'age', 'sex', 'job', 'id'], encoding=encoding) parts = ['u1.base', 'u1.test', 'u2.base', 'u2.test','u3.base', 'u3.test','u4.base', 'u4.test','u5.base', 'u5.test','ua.base', 'ua.test','ub.base', 'ub.test'] ratings = [] for part in parts: rate_part = pd.read_csv(os.path.join(movie_path, part), index_col = 'index', header=None, sep='\t', names=['user', 'item', 'rating', 'index'], encoding=encoding) rate_part['split'] = part ratings.append(rate_part) Y = pd.concat(ratings) return data_details_return({'Y':Y, 'film_info':items, 'user_info':users, 'info': 'The Movielens 100k data'}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ceres(data_set='ceres'): """Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later."""
if not data_available(data_set): download_data(data_set) import pandas as pd data = pd.read_csv(os.path.join(data_path, data_set, 'ceresData.txt'), index_col = 'Tag', header=None, sep='\t',names=['Tag', 'Mittlere Sonnenzeit', 'Gerade Aufstig in Zeit', 'Gerade Aufstiegung in Graden', 'Nordlich Abweich', 'Geocentrische Laenger', 'Geocentrische Breite', 'Ort der Sonne + 20" Aberration', 'Logar. d. Distanz'], parse_dates=True, dayfirst=False) return data_details_return({'data': data}, data_set)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def access_elementusers(self, elementuser_id, access_id=None, tenant_id=None, api_version="v2.0"): """ Get all accesses for a particular user **Parameters:**: - **elementuser_id**: Element User ID - **access_id**: (optional) Access ID - **tenant_id**: Tenant ID - **api_version**: API version to use (default v2.0) **Returns:** requests.Response object extended with cgx_status and cgx_content properties. """
if tenant_id is None and self._parent_class.tenant_id: # Pull tenant_id from parent namespace cache. tenant_id = self._parent_class.tenant_id elif not tenant_id: # No value for tenant_id. raise TypeError("tenant_id is required but not set or cached.") cur_ctlr = self._parent_class.controller if not access_id: url = str(cur_ctlr) + "/{}/api/tenants/{}/elementusers/{}/access".format(api_version, tenant_id, elementuser_id) else: url = str(cur_ctlr) + "/{}/api/tenants/{}/elementusers/{}/access/{}".format(api_version, tenant_id, elementuser_id, access_id) api_logger.debug("URL = %s", url) return self._parent_class.rest_call(url, "get")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logout(self, api_version="v2.0"): """ Logout current session **Parameters:**: - **api_version**: API version to use (default v2.0) **Returns:** requests.Response object extended with cgx_status and cgx_content properties. """
cur_ctlr = self._parent_class.controller url = str(cur_ctlr) + "/{}/api/logout".format(api_version) api_logger.debug("URL = %s", url) return self._parent_class.rest_call(url, "get")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def use_token(self, token=None): """ Function to use static AUTH_TOKEN as auth for the constructor instead of full login process. **Parameters:**: - **token**: Static AUTH_TOKEN **Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API` constructor items as needed. """
api_logger.info('use_token function:') # check token is a string. if not isinstance(token, (text_type, binary_type)): api_logger.debug('"token" was not a text-style string: {}'.format(text_type(token))) return False # Start setup of constructor. session = self._parent_class.expose_session() # clear cookies session.cookies.clear() # Static Token uses X-Auth-Token header instead of cookies. self._parent_class.add_headers({ 'X-Auth-Token': token }) # Step 2: Get operator profile for tenant ID and other info. if self.interactive_update_profile_vars(): # pull tenant detail if self._parent_class.tenant_id: # add tenant values to API() object if self.interactive_tenant_update_vars(): # Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for. if self._parent_class.is_esp: # ESP/MSP! choose_status, chosen_client_id = self.interactive_client_choice() if choose_status: # attempt to login as client clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {}) if clogin_resp.cgx_status: # login successful, update profile and tenant info c_profile = self.interactive_update_profile_vars() t_profile = self.interactive_tenant_update_vars() if c_profile and t_profile: # successful full client login. self._parent_class._password = None return True else: if t_profile: print("ESP Client Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: print("ESP Client Login failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: print("ESP Client Choice failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False # successful! # clear password out of memory self._parent_class._password = None return True else: print("Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: # Profile detail retrieval failed self._parent_class.email = None self._parent_class._password = None return False api_logger.info("EMAIL = %s", self._parent_class.email) api_logger.info("USER_ID = %s", self._parent_class._user_id) api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles)) api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id) api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name) api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interactive_tenant_update_vars(self): """ Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login. **Returns:** Boolean on success/failure, """
api_logger.info('interactive_tenant_update_vars function:') tenant_resp = self._parent_class.get.tenants(self._parent_class.tenant_id) status = tenant_resp.cgx_status tenant_dict = tenant_resp.cgx_content if status: api_logger.debug("new tenant_dict: %s", tenant_dict) # Get Tenant info. self._parent_class.tenant_name = tenant_dict.get('name', self._parent_class.tenant_id) # is ESP/MSP? self._parent_class.is_esp = tenant_dict.get('is_esp') # grab tenant address for location. address_lookup = tenant_dict.get('address', None) if address_lookup: tenant_address = address_lookup.get('street', "") + ", " tenant_address += (str(address_lookup.get('street2', "")) + ", ") tenant_address += (str(address_lookup.get('city', "")) + ", ") tenant_address += (str(address_lookup.get('state', "")) + ", ") tenant_address += (str(address_lookup.get('post_code', "")) + ", ") tenant_address += (str(address_lookup.get('country', "")) + ", ") else: tenant_address = "Unknown" self._parent_class.address = tenant_address return True else: # update failed return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interactive_update_profile_vars(self): """ Function to update the `cloudgenix.API` object with profile info. Run after login or client login. **Returns:** Boolean on success/failure, """
profile = self._parent_class.get.profile() if profile.cgx_status: # if successful, save tenant id and email info to cli state. self._parent_class.tenant_id = profile.cgx_content.get('tenant_id') self._parent_class.email = profile.cgx_content.get('email') self._parent_class._user_id = profile.cgx_content.get('id') self._parent_class.roles = profile.cgx_content.get('roles', []) self._parent_class.token_session = profile.cgx_content.get('token_session') return True else: print("Profile retrieval failed.") # clear password out of memory self._parent_class._password = None return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quick_menu(self, banner, list_line_format, choice_list): """ Function to display a quick menu for user input **Parameters:** - **banner:** Text to display before menu - **list_line_format:** Print'ing string with format spots for index + tuple values - **choice_list:** List of tuple values that you want returned if selected (and printed) **Returns:** Tuple that was selected. """
# Setup menu invalid = True menu_int = -1 # loop until valid while invalid: print(banner) for item_index, item_value in enumerate(choice_list): print(list_line_format.format(item_index + 1, *item_value)) menu_choice = compat_input("\nChoose a Number or (Q)uit: ") if str(menu_choice).lower() in ['q']: # exit print("Exiting..") # best effort logout self._parent_class.get.logout() sys.exit(0) # verify number entered try: menu_int = int(menu_choice) sanity = True except ValueError: # not a number print("ERROR: ", menu_choice) sanity = False # validate number chosen if sanity and 1 <= menu_int <= len(choice_list): invalid = False else: print("Invalid input, needs to be between 1 and {0}.\n".format(len(choice_list))) # return the choice_list tuple that matches the entry. return choice_list[int(menu_int) - 1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_sso_login(self, operator_email, request_id): """ Login to the CloudGenix API, and see if SAML SSO has occurred. This function is used to check and see if SAML SSO has succeeded while waiting. **Parameters:** - **operator_email:** String with the username to log in with - **request_id:** String containing the SAML 2.0 Request ID from previous login attempt. **Returns:** Tuple (Boolean success, Token on success, JSON response on error.) """
data = { "email": operator_email, "requestId": request_id } # If debug is set.. api_logger.info('check_sso_login function:') response = self._parent_class.post.login(data=data) # If valid response, but no token. if not response.cgx_content.get('x_auth_token'): # no valid login yet. return response # update with token and region auth_region = self._parent_class.parse_region(response) self._parent_class.update_region_to_controller(auth_region) self._parent_class.reparse_login_cookie_after_region_update(response) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quick_confirm(prompt, default_value): """ Function to display a quick confirmation for user input **Parameters:** - **prompt:** Text to display before confirm - **default_value:** Default value for no entry **Returns:** 'y', 'n', or Default value. """
valid = False value = default_value.lower() while not valid: input_val = compat_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": value = default_value.lower() valid = True else: try: if input_val.lower() in ['y', 'n']: value = input_val.lower() valid = True else: print("ERROR: enter 'Y' or 'N'.") valid = False except ValueError: print("ERROR: enter 'Y' or 'N'.") valid = False return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quick_int_input(prompt, default_value, min_val=1, max_val=30): """ Function to display a quick question for integer user input **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry - **min_val:** Lowest allowed integer - **max_val:** Highest allowed integer **Returns:** integer or default_value. """
valid = False num_val = default_value while not valid: input_val = compat_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": num_val = default_value valid = True else: try: num_val = int(input_val) if min_val <= num_val <= max_val: valid = True else: print("ERROR: must be between {0} and {1}.".format(min, max)) valid = False except ValueError: print("ERROR: must be a number.") valid = False return num_val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def quick_str_input(prompt, default_value): """ Function to display a quick question for text input. **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry **Returns:** text_type() or default_value. """
valid = False str_val = default_value while not valid: input_val = raw_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": str_val = default_value valid = True else: try: str_val = text_type(input_val) valid = True except ValueError: print("ERROR: must be text.") valid = False return str_val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tran_hash(self, a, b, c, n): """implementation of the tran53 hash function"""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self, chunk): """ computes the hash of all of the trigrams in the chunk using a window of length 5 """
self._digest = None if isinstance(chunk, text_type): chunk = chunk.encode('utf-8') # chunk is a byte string for char in chunk: self.num_char += 1 if PY3: # In Python 3, iterating over bytes yields integers c = char else: c = ord(char) if len(self.window) > 1: # seen at least three characters self.acc[self.tran_hash(c, self.window[0], self.window[1], 0)] += 1 if len(self.window) > 2: # seen at least four characters self.acc[self.tran_hash(c, self.window[0], self.window[2], 1)] += 1 self.acc[self.tran_hash(c, self.window[1], self.window[2], 2)] += 1 if len(self.window) > 3: # have a full window self.acc[self.tran_hash(c, self.window[0], self.window[3], 3)] += 1 self.acc[self.tran_hash(c, self.window[1], self.window[3], 4)] += 1 self.acc[self.tran_hash(c, self.window[2], self.window[3], 5)] += 1 # duplicate hashes, used to maintain 8 trigrams per character self.acc[self.tran_hash(self.window[3], self.window[0], c, 6)] += 1 self.acc[self.tran_hash(self.window[3], self.window[2], c, 7)] += 1 # add current character to the window, remove the previous character if len(self.window) < 4: self.window = [c] + self.window else: self.window = [c] + self.window[:3]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_file(self, fname): """read in a file and compute digest"""
f = open(fname, "rb") data = f.read() self.update(data) f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare(self, digest_2, is_hex = False): """ returns difference between the nilsimsa digests between the current object and a given digest """
# convert hex string to list of ints if is_hex: digest_2 = convert_hex_to_ints(digest_2) bit_diff = 0 for i in range(len(self.digest)): bit_diff += POPC[self.digest[i] ^ digest_2[i]] #computes the bit diff between the i'th position of the digests return 128 - bit_diff
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tenant_forgot_password_login(self, data, tenant_id=None, api_version="v2.0"): """ Forgot password API **Parameters:**: - **data**: Dictionary containing data to POST as JSON - **tenant_id**: Tenant ID - **api_version**: API version to use (default v2.0) **Returns:** requests.Response object extended with cgx_status and cgx_content properties. """
if tenant_id is None and self._parent_class.tenant_id: # Pull tenant_id from parent namespace cache. tenant_id = self._parent_class.tenant_id elif not tenant_id: # No value for tenant_id. raise TypeError("tenant_id is required but not set or cached.") cur_ctlr = self._parent_class.controller url = str(cur_ctlr) + "/{}/api/tenants/{}/login/password/forgot".format(api_version, tenant_id) api_logger.debug("URL = %s", url) return self._parent_class.rest_call(url, "post", data=data, sensitive=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid_file(parser,arg): """verify the validity of the given file. Never trust the End-User"""
if not os.path.exists(arg): parser.error("File %s not found"%arg) else: return arg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getID(code_file): """Get the language ID of the input file language"""
json_path = ghostfolder+'/'+json_file if os.path.exists(json_path): pass else: download_file('https://ghostbin.com/languages.json') lang = detect_lang(code_file) json_data = json.load(file(json_path))#don't think i need this though ID = '' for i in range(len(json_data)): temp = len(json_data[i]['languages']) for j in range(temp): if json_data[i]['languages'][j]['name'].lower() == lang.lower(): ID = json_data[i]['languages'][j]['id'] print('Gotten language ID from \'languages.json\': {0}'.format(ID)) return ID
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detect_lang(path): """Detect the language used in the given file."""
blob = FileBlob(path, os.getcwd()) if blob.is_text: print('Programming language of the file detected: {0}'.format(blob.language.name)) return blob.language.name else:#images, binary and what-have-you won't be pasted print('File not a text file. Exiting...') sys.exit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def screenshot(self, scale=None, quality=None): """ Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality default scale=1.0 quality=100 """
output_dir = BuiltIn().get_variable_value('${OUTPUTDIR}') ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S') screenshot_path = '%s%s%s.png' % (output_dir, os.sep, st) self.device.screenshot(screenshot_path, scale, quality) logger.info('\n<a href="%s">%s</a><br><img src="%s">' % (screenshot_path, st, screenshot_path), html=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call(self, obj, method, *args, **selectors): """ This keyword can use object method from original python uiautomator See more details from https://github.com/xiaocong/uiautomator Example: | ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object | | Call | ${accessibility_text} | click | # Call the method of the UI object 'click' | """
func = getattr(obj, method) return func(**selectors)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_sims(oldsims, newsims, clip=None): """Merge two precomputed similarity lists, truncating the result to `clip` most similar items."""
if oldsims is None: result = newsims or [] elif newsims is None: result = oldsims else: result = sorted(oldsims + newsims, key=lambda item: -item[1]) if clip is not None: result = result[:clip] return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate(self): """Delete all files created by this index, invalidating `self`. Use with care."""
try: self.id2sims.terminate() except: pass import glob for fname in glob.glob(self.fname + '*'): try: os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s: %s" % (fname, e)) for val in self.__dict__.keys(): try: delattr(self, val) except: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_ids(self, docids): """Update id->pos mapping with new document ids."""
logger.info("updating %i id mappings" % len(docids)) for docid in docids: if docid is not None: pos = self.id2pos.get(docid, None) if pos is not None: logger.info("replacing existing document %r in %s" % (docid, self)) del self.pos2id[pos] self.id2pos[docid] = self.length try: del self.id2sims[docid] except: pass self.length += 1 self.id2sims.sync() self.update_mappings()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vec_by_id(self, docid): """Return indexed vector corresponding to document `docid`."""
pos = self.id2pos[docid] return self.qindex.vector_by_id(pos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self, other): """Merge documents from the other index. Update precomputed similarities in the process."""
other.qindex.normalize, other.qindex.num_best = False, self.topsims # update precomputed "most similar" for old documents (in case some of # the new docs make it to the top-N for some of the old documents) logger.info("updating old precomputed values") pos, lenself = 0, len(self.qindex) for chunk in self.qindex.iter_chunks(): for sims in other.qindex[chunk]: if pos in self.pos2id: # ignore masked entries (deleted, overwritten documents) docid = self.pos2id[pos] sims = self.sims2scores(sims) self.id2sims[docid] = merge_sims(self.id2sims[docid], sims, self.topsims) pos += 1 if pos % 10000 == 0: logger.info("PROGRESS: updated doc #%i/%i" % (pos, lenself)) self.id2sims.sync() logger.info("merging fresh index into optimized one") pos, docids = 0, [] for chunk in other.qindex.iter_chunks(): for vec in chunk: if pos in other.pos2id: # don't copy deleted documents self.qindex.add_documents([vec]) docids.append(other.pos2id[pos]) pos += 1 self.qindex.save() self.update_ids(docids) logger.info("precomputing most similar for the fresh index") pos, lenother = 0, len(other.qindex) norm, self.qindex.normalize = self.qindex.normalize, False topsims, self.qindex.num_best = self.qindex.num_best, self.topsims for chunk in other.qindex.iter_chunks(): for sims in self.qindex[chunk]: if pos in other.pos2id: # ignore masked entries (deleted, overwritten documents) docid = other.pos2id[pos] self.id2sims[docid] = self.sims2scores(sims) pos += 1 if pos % 10000 == 0: logger.info("PROGRESS: precomputed doc #%i/%i" % (pos, lenother)) self.qindex.normalize, self.qindex.num_best = norm, topsims self.id2sims.sync()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def doc2vec(self, doc): """Convert a single SimilarityDocument to vector."""
bow = self.dictionary.doc2bow(doc['tokens']) if self.method == 'lsi': return self.lsi[self.tfidf[bow]] elif self.method == 'lda': return self.lda[bow] elif self.method == 'lda_tfidf': return self.lda[self.tfidf[bow]] elif self.method == 'logentropy': return self.logent[bow]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flush(self, save_index=False, save_model=False, clear_buffer=False): """Commit all changes, clear all caches."""
if save_index: if self.fresh_index is not None: self.fresh_index.save(self.location('index_fresh')) if self.opt_index is not None: self.opt_index.save(self.location('index_opt')) if save_model: if self.model is not None: self.model.save(self.location('model')) self.payload.commit() if clear_buffer: if hasattr(self, 'fresh_docs'): try: self.fresh_docs.terminate() # erase all buffered documents + file on disk except: pass self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp self.fresh_docs.sync()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """Explicitly close open file handles, databases etc."""
try: self.payload.close() except: pass try: self.model.close() except: pass try: self.fresh_index.close() except: pass try: self.opt_index.close() except: pass try: self.fresh_docs.terminate() except: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def train(self, corpus=None, method='auto', clear_buffer=True, params=None): """ Create an indexing model. Will overwrite the model if it already exists. All indexes become invalid, because documents in them use a now-obsolete representation. The model is trained on documents previously entered via `buffer`, or directly on `corpus`, if specified. """
if corpus is not None: # use the supplied corpus only (erase existing buffer, if any) self.flush(clear_buffer=True) self.buffer(corpus) if not self.fresh_docs: msg = "train called but no training corpus specified for %s" % self logger.error(msg) raise ValueError(msg) if method == 'auto': numdocs = len(self.fresh_docs) if numdocs < 1000: logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing") method = 'logentropy' else: method = 'lsi' if params is None: params = {} self.model = SimModel(self.fresh_docs, method=method, params=params) self.flush(save_model=True, clear_buffer=clear_buffer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(self, corpus=None, clear_buffer=True): """ Permanently index all documents previously added via `buffer`, or directly index documents from `corpus`, if specified. The indexing model must already exist (see `train`) before this function is called. """
if not self.model: msg = 'must initialize model for %s before indexing documents' % self.basename logger.error(msg) raise AttributeError(msg) if corpus is not None: # use the supplied corpus only (erase existing buffer, if any) self.flush(clear_buffer=True) self.buffer(corpus) if not self.fresh_docs: msg = "index called but no indexing corpus specified for %s" % self logger.error(msg) raise ValueError(msg) if not self.fresh_index: logger.info("starting a new fresh index for %s" % self) self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features) self.fresh_index.index_documents(self.fresh_docs, self.model) if self.opt_index is not None: self.opt_index.delete(self.fresh_docs.keys()) logger.info("storing document payloads") for docid in self.fresh_docs: payload = self.fresh_docs[docid].get('payload', None) if payload is None: # HACK: exit on first doc without a payload (=assume all docs have payload, or none does) break self.payload[docid] = payload self.flush(save_index=True, clear_buffer=clear_buffer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop_index(self, keep_model=True): """Drop all indexed documents. If `keep_model` is False, also dropped the model."""
modelstr = "" if keep_model else "and model " logger.info("deleting similarity index " + modelstr + "from %s" % self.basename) # delete indexes for index in [self.fresh_index, self.opt_index]: if index is not None: index.terminate() self.fresh_index, self.opt_index = None, None # delete payload if self.payload is not None: self.payload.close() fname = self.location('payload') try: if os.path.exists(fname): os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s" % fname) self.payload = SqliteDict(self.location('payload'), autocommit=True, journal_mode=JOURNAL_MODE) # optionally, delete the model as well if not keep_model and self.model is not None: self.model.close() fname = self.location('model') try: if os.path.exists(fname): os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s" % fname) self.model = None self.flush(save_index=True, save_model=True, clear_buffer=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, docids): """Delete specified documents from the index."""
logger.info("asked to drop %i documents" % len(docids)) for index in [self.opt_index, self.fresh_index]: if index is not None: index.delete(docids) self.flush(save_index=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_similar(self, doc, min_score=0.0, max_results=100): """ Find `max_results` most similar articles in the index, each having similarity score of at least `min_score`. The resulting list may be shorter than `max_results`, in case there are not enough matching documents. `doc` is either a string (=document id, previously indexed) or a dict containing a 'tokens' key. These tokens are processed to produce a vector, which is then used as a query against the index. The similar documents are returned in decreasing similarity order, as `(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned is identical to what was supplied for this document during indexing. """
logger.debug("received query call with %r" % doc) if self.is_locked(): msg = "cannot query while the server is being updated" logger.error(msg) raise RuntimeError(msg) sims_opt, sims_fresh = None, None for index in [self.fresh_index, self.opt_index]: if index is not None: index.topsims = max_results if isinstance(doc, basestring): # query by direct document id docid = doc if self.opt_index is not None and docid in self.opt_index: sims_opt = self.opt_index.sims_by_id(docid) if self.fresh_index is not None: vec = self.opt_index.vec_by_id(docid) sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False) elif self.fresh_index is not None and docid in self.fresh_index: sims_fresh = self.fresh_index.sims_by_id(docid) if self.opt_index is not None: vec = self.fresh_index.vec_by_id(docid) sims_opt = self.opt_index.sims_by_vec(vec, normalize=False) else: raise ValueError("document %r not in index" % docid) else: if 'topics' in doc: # user supplied vector directly => use that vec = gensim.matutils.any2sparse(doc['topics']) else: # query by an arbitrary text (=tokens) inside doc['tokens'] vec = self.model.doc2vec(doc) # convert document (text) to vector if self.opt_index is not None: sims_opt = self.opt_index.sims_by_vec(vec) if self.fresh_index is not None: sims_fresh = self.fresh_index.sims_by_vec(vec) merged = merge_sims(sims_opt, sims_fresh) logger.debug("got %s raw similars, pruning with max_results=%s, min_score=%s" % (len(merged), max_results, min_score)) result = [] for docid, score in merged: if score < min_score or 0 < max_results <= len(result): break result.append((docid, float(score), self.payload.get(docid, None))) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keys(self): """Return ids of all indexed documents."""
result = [] if self.fresh_index is not None: result += self.fresh_index.keys() if self.opt_index is not None: result += self.opt_index.keys() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_session(self): """ Make sure a session is open. If it's not and autosession is turned on, create a new session automatically. If it's not and autosession is off, raise an exception. """
if self.session is None: if self.autosession: self.open_session() else: msg = "must open a session before modifying %s" % self raise RuntimeError(msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_session(self): """ Open a new session to modify this server. You can either call this fnc directly, or turn on autosession which will open/commit sessions for you transparently. """
if self.session is not None: msg = "session already open; commit it or rollback before opening another one in %s" % self logger.error(msg) raise RuntimeError(msg) logger.info("opening a new session") logger.info("removing %s" % self.loc_session) try: shutil.rmtree(self.loc_session) except: logger.info("failed to delete %s" % self.loc_session) logger.info("cloning server from %s to %s" % (self.loc_stable, self.loc_session)) shutil.copytree(self.loc_stable, self.loc_session) self.session = SimServer(self.loc_session, use_locks=self.use_locks) self.lock_update.acquire()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buffer(self, *args, **kwargs): """Buffer documents, in the current session"""
self.check_session() result = self.session.buffer(*args, **kwargs) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(self, *args, **kwargs): """Index documents, in the current session"""
self.check_session() result = self.session.index(*args, **kwargs) if self.autosession: self.commit() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop_index(self, keep_model=True): """Drop all indexed documents from the session. Optionally, drop model too."""
self.check_session() result = self.session.drop_index(keep_model) if self.autosession: self.commit() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, docids): """Delete documents from the current session."""
self.check_session() result = self.session.delete(docids) if self.autosession: self.commit() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def optimize(self): """Optimize index for faster by-document-id queries."""
self.check_session() result = self.session.optimize() if self.autosession: self.commit() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commit(self): """Commit changes made by the latest session."""
if self.session is not None: logger.info("committing transaction in %s" % self) tmp = self.stable self.stable, self.session = self.session, None self.istable = 1 - self.istable self.write_istable() tmp.close() # don't wait for gc, release resources manually self.lock_update.release() else: logger.warning("commit called but there's no open session in %s" % self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminate(self): """Delete all files created by this server, invalidating `self`. Use with care."""
logger.info("deleting entire server %s" % self) self.close() try: shutil.rmtree(self.basedir) logger.info("deleted server under %s" % self.basedir) # delete everything from self, so that using this object fails results # in an error as quickly as possible for val in self.__dict__.keys(): try: delattr(self, val) except: pass except Exception, e: logger.warning("failed to delete SessionServer: %s" % (e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_similar(self, *args, **kwargs): """ Find similar articles. With autosession off, use the index state *before* current session started, so that changes made in the session will not be visible here. With autosession on, close the current session first (so that session changes *are* committed and visible). """
if self.session is not None and self.autosession: # with autosession on, commit the pending transaction first self.commit() return self.stable.find_similar(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def profile(self, ctx, platform, name): '''Fetch a profile.''' player = await self.client.get_player(platform, name) solos = await player.get_solos() await ctx.send("# of kills in solos for {}: {}".format(name,solos.kills.value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_chunks(data, chunk_size=DEFAULT_CHUNK_SIZE): """Yield 'chunk_size' items from 'data' at a time."""
iterator = iter(repeated.getvalues(data)) while True: chunk = list(itertools.islice(iterator, chunk_size)) if not chunk: return yield chunk
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE): """Repeatedly call fold and merge on data and then finalize. Arguments: data: Input for the fold function. reducer: The IReducer to use. chunk_size: How many items should be passed to fold at a time? Returns: Return value of finalize. """
if not chunk_size: return finalize(reducer, fold(reducer, data)) # Splitting the work up into chunks allows us to, e.g. reduce a large file # without loading everything into memory, while still being significantly # faster than repeatedly calling the fold function for every element. chunks = generate_chunks(data, chunk_size) intermediate = fold(reducer, next(chunks)) for chunk in chunks: intermediate = merge(reducer, intermediate, fold(reducer, chunk)) return finalize(reducer, intermediate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def conditions(self): """The if-else pairs."""
for idx in six.moves.range(1, len(self.children), 2): yield (self.children[idx - 1], self.children[idx])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_noargs(self, **options): """Send Report E-mails."""
r = get_r() since = datetime.utcnow() - timedelta(days=1) metrics = {} categories = r.metric_slugs_by_category() for category_name, slug_list in categories.items(): metrics[category_name] = [] for slug in slug_list: metric_values = r.get_metric_history(slug, since=since) metrics[category_name].append( (slug, metric_values) ) # metrics is now: # -------------- # { Category : [ # ('foo', [('m:foo:2012-07-18', 1), ('m:foo:2012-07-19, 2), ...]) # ], # ... # } template = "redis_metrics/email/report.{fmt}" data = { 'today': since, 'metrics': metrics, } message = render_to_string(template.format(fmt='txt'), data) message_html = render_to_string(template.format(fmt='html'), data) msg = EmailMultiAlternatives( subject="Redis Metrics Report", body=message, from_email=settings.DEFAULT_FROM_EMAIL, to=[email for name, email in settings.ADMINS] ) msg.attach_alternative(message_html, "text/html") msg.send()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_tasks(self, value): """ Adds tasks to the existing set of tasks of the Stage :argument: set of tasks """
tasks = self._validate_entities(value) self._tasks.update(tasks) self._task_count = len(self._tasks)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Convert current Stage into a dictionary :return: python dictionary """
stage_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'parent_pipeline': self._p_pipeline } return stage_desc_as_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, d): """ Create a Stage from a dictionary. The change is in inplace. :argument: python dictionary :return: None """
if 'uid' in d: if d['uid']: self._uid = d['uid'] if 'name' in d: if d['name']: self._name = d['name'] if 'state' in d: if isinstance(d['state'], str) or isinstance(d['state'], unicode): if d['state'] in states._stage_state_values.keys(): self._state = d['state'] else: raise ValueError(obj=self._uid, attribute='state', expected_value=states._stage_state_values.keys(), actual_value=value) else: raise TypeError(entity='state', expected_type=str, actual_type=type(d['state'])) else: self._state = states.INITIAL if 'state_history' in d: if isinstance(d['state_history'], list): self._state_history = d['state_history'] else: raise TypeError(entity='state_history', expected_type=list, actual_type=type(d['state_history'])) if 'parent_pipeline' in d: if isinstance(d['parent_pipeline'], dict): self._p_pipeline = d['parent_pipeline'] else: raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(d['parent_pipeline']))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_spec_file(self): """Generates the text of an RPM spec file. Returns: A list of strings containing the lines of text. """
# Note that bdist_rpm can be an old style class. if issubclass(BdistRPMCommand, object): spec_file = super(BdistRPMCommand, self)._make_spec_file() else: spec_file = bdist_rpm._make_spec_file(self) if sys.version_info[0] < 3: python_package = "python" else: python_package = "python3" description = [] summary = "" in_description = False python_spec_file = [] for line in spec_file: if line.startswith("Summary: "): summary = line elif line.startswith("BuildRequires: "): line = "BuildRequires: {0:s}-setuptools".format(python_package) elif line.startswith("Requires: "): if python_package == "python3": line = line.replace("python", "python3") elif line.startswith("%description"): in_description = True elif line.startswith("%files"): line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format( python_package) elif line.startswith("%prep"): in_description = False python_spec_file.append( "%package -n {0:s}-%{{name}}".format(python_package)) python_spec_file.append("{0:s}".format(summary)) python_spec_file.append("") python_spec_file.append( "%description -n {0:s}-%{{name}}".format(python_package)) python_spec_file.extend(description) elif in_description: # Ignore leading white lines in the description. if not description and not line: continue description.append(line) python_spec_file.append(line) return python_spec_file
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve(self, name): """Call IStructured.resolve across all scopes and return first hit."""
for scope in reversed(self.scopes): try: return structured.resolve(scope, name) except (KeyError, AttributeError): continue raise AttributeError(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reflect(self, name): """Reflect 'name' starting with local scope all the way up to global. This method will attempt both static and runtime reflection. This is the recommended way of using reflection. Returns: Type of 'name', or protocol.AnyType. Caveat: The type of 'name' does not necessarily have to be an instance of Python's type - it depends on what the host application returns through the reflection API. For example, Rekall uses objects generated at runtime to simulate a native (C/C++) type system. """
# Return whatever the most local scope defines this as, or bubble all # the way to the top. result = None for scope in reversed(self.scopes): try: if isinstance(scope, type): result = structured.reflect_static_member(scope, name) else: result = structured.reflect_runtime_member(scope, name) if result is not None: return result except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reflect_runtime_member(self, name): """Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """
for scope in reversed(self.scopes): try: return structured.reflect_runtime_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reflect_static_member(cls, name): """Reflect 'name' using ONLY static reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """
for scope in reversed(cls.scopes): try: return structured.reflect_static_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_hostmap(profile): ''' We abuse the profile combination to also derive a pilot-host map, which will tell us on what exact host each pilot has been running. To do so, we check for the PMGR_ACTIVE advance event in agent_0.prof, and use the NTP sync info to associate a hostname. ''' # FIXME: This should be replaced by proper hostname logging # in `pilot.resource_details`. hostmap = dict() # map pilot IDs to host names for entry in profile: if entry[ru.EVENT] == 'hostname': hostmap[entry[ru.UID]] = entry[ru.MSG] return hostmap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_hostmap_deprecated(profiles): ''' This method mangles combine_profiles and get_hostmap, and is deprecated. At this point it only returns the hostmap ''' hostmap = dict() # map pilot IDs to host names for pname, prof in profiles.iteritems(): if not len(prof): continue if not prof[0][ru.MSG]: continue host, ip, _, _, _ = prof[0][ru.MSG].split(':') host_id = '%s:%s' % (host, ip) for row in prof: if 'agent_0.prof' in pname and \ row[ru.EVENT] == 'advance' and \ row[ru.STATE] == rps.PMGR_ACTIVE: hostmap[row[ru.UID]] = host_id break return hostmap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def categorize_metrics(self): """Called only on a valid form, this method will place the chosen metrics in the given catgory."""
category = self.cleaned_data['category_name'] metrics = self.cleaned_data['metrics'] self.r.reset_category(category, metrics)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match(self, f, *args): """Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched. """
try: match = f(self.tokenizer, *args) except StopIteration: # The grammar function might have tried to access more tokens than # are available. That's not really an error, it just means it didn't # match. return if match is None: return if not isinstance(match, grammar.TokenMatch): raise TypeError("Invalid grammar function %r returned %r." % (f, match)) self.matched = match return match
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reject(self, f, *args): """Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names. """
match = self.match(f, *args) if match: token = self.peek(0) raise errors.EfilterParseError( query=self.tokenizer.source, token=token, message="Was not expecting a %s here." % token.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expect(self, f, *args): """Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args) if match: return match try: func_name = f.func_name except AttributeError: func_name = "<unnamed grammar function>" start, end = self.current_position() raise errors.EfilterParseError( query=self.tokenizer.source, start=start, end=end, message="Was expecting %s here." % (func_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_var(expr, vars): """Returns the value of the var named in the expression."""
try: return Result(structured.resolve(vars, expr.value), ()) except (KeyError, AttributeError) as e: # Raise a better exception for accessing a non-existent member. raise errors.EfilterKeyError(root=expr, key=expr.value, message=e, query=expr.source) except (TypeError, ValueError) as e: # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Trying to access member %r of a null." % expr.value) else: raise errors.EfilterTypeError( root=expr, query=expr.source, message="%r (vars: %r)" % (e, vars)) except NotImplementedError as e: raise errors.EfilterError( root=expr, query=expr.source, message="Trying to access member %r of an instance of %r." % (expr.value, type(vars)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_repeat(expr, vars): """Build a repeated value from subexpressions."""
try: result = repeated.meld(*[solve(x, vars).value for x in expr.children]) return Result(result, ()) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="All values in a repeated value must be of the same type.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_tuple(expr, vars): """Build a tuple from subexpressions."""
result = tuple(solve(x, vars).value for x in expr.children) return Result(result, ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_ifelse(expr, vars): """Evaluate conditions and return the one that matches."""
for condition, result in expr.conditions(): if boolean.asbool(solve(condition, vars).value): return solve(result, vars) return solve(expr.default(), vars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_map(expr, vars): """Solves the map-form, by recursively calling its RHS with new vars. let-forms are binary expressions. The LHS should evaluate to an IAssociative that can be used as new vars with which to solve a new query, of which the RHS is the root. In most cases, the LHS will be a Var (var). Typically, map-forms result from the dotty "dot" (.) operator. For example, the query "User.name" will translate to a map-form with the var "User" on LHS and a var to "name" on the RHS. With top-level vars being something like {"User": {"name": "Bob"}}, the Var on the LHS will evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as new vars, and that whole form will evaluate to "Bob". """
lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_map(): try: for lhs_value in repeated.getvalues(lhs_values): yield solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value except errors.EfilterNoneError as error: error.root = expr raise return Result(repeated.lazy(lazy_map), ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_let(expr, vars): """Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value if not isinstance(lhs_value, structured.IStructured): raise errors.EfilterTypeError( root=expr.lhs, query=expr.original, message="The LHS of 'let' must evaluate to an IStructured. Got %r." % (lhs_value,)) return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_filter(expr, vars): """Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value. """
lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_sort(expr, vars): """Sort values on the LHS by the value they yield when passed to RHS."""
lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0]) sort_expression = expr.rhs def _key_func(x): return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value results = ordered.ordered(lhs_values, key_func=_key_func) return Result(repeated.meld(*results), ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_each(expr, vars): """Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true. """
lhs_values, _ = __solve_for_repeated(expr.lhs, vars) for lhs_value in repeated.getvalues(lhs_values): result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) if not result.value: # Each is required to return an actual boolean. return result._replace(value=False) return Result(True, ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_cast(expr, vars): """Get cast LHS to RHS."""
lhs = solve(expr.lhs, vars).value t = solve(expr.rhs, vars).value if t is None: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'cast'." % (t,)) try: cast_value = t(lhs) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="Invalid cast %s -> %s." % (type(lhs), t)) return Result(cast_value, ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve_isinstance(expr, vars): """Typecheck whether LHS is type on the RHS."""
lhs = solve(expr.lhs, vars) try: t = solve(expr.rhs, vars).value except errors.EfilterKeyError: t = None if t is None: raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="Cannot find type named %r." % expr.rhs.value) if not isinstance(t, type): raise errors.EfilterTypeError( root=expr.rhs, query=expr.source, message="%r is not a type and cannot be used with 'isa'." % (t,)) return Result(protocol.implements(lhs.value, t), ())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_version(mod_root): """ mod_root a VERSION file containes the version strings is created in mod_root, during installation. That file is used at runtime to get the version information. """
try: version_base = None version_detail = None # get version from './VERSION' src_root = os.path.dirname(__file__) if not src_root: src_root = '.' with open(src_root + '/VERSION', 'r') as f: version_base = f.readline().strip() # attempt to get version detail information from git # We only do that though if we are in a repo root dir, # ie. if 'git rev-parse --show-prefix' returns an empty string -- # otherwise we get confused if the ve lives beneath another repository, # and the pip version used uses an install tmp dir in the ve space # instead of /tmp (which seems to happen with some pip/setuptools # versions). p = sp.Popen('cd %s ; ' 'test -z `git rev-parse --show-prefix` || exit -1; ' 'tag=`git describe --tags --always` 2>/dev/null ; ' 'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; ' 'echo $tag@$branch' % src_root, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True) version_detail = str(p.communicate()[0].strip()) version_detail = version_detail.replace('detached from ', 'detached-') # remove all non-alphanumeric (and then some) chars version_detail = re.sub('[/ ]+', '-', version_detail) version_detail = re.sub('[^a-zA-Z0-9_+@.-]+', '', version_detail) if p.returncode != 0 or \ version_detail == '@' or \ 'git-error' in version_detail or \ 'not-a-git-repo' in version_detail or \ 'not-found' in version_detail or \ 'fatal' in version_detail : version = version_base elif '@' not in version_base: version = '%s-%s' % (version_base, version_detail) else: version = version_base # make sure the version files exist for the runtime version inspection path = '%s/%s' % (src_root, mod_root) with open(path + "/VERSION", "w") as f: f.write(version + "\n") sdist_name = "%s-%s.tar.gz" % (name, version) sdist_name = sdist_name.replace('/', '-') sdist_name = sdist_name.replace('@', '-') sdist_name = sdist_name.replace('#', '-') sdist_name = sdist_name.replace('_', '-') if '--record' in sys.argv or \ 'bdist_egg' in sys.argv or \ 'bdist_wheel' in sys.argv : # pip install stage 2 or easy_install stage 1 # # pip install will untar the sdist in a tmp tree. In that tmp # tree, we won't be able to derive git version tags -- so we pack the # formerly derived version as ./VERSION shutil.move("VERSION", "VERSION.bak") # backup version shutil.copy("%s/VERSION" % path, "VERSION") # use full version instead os.system ("python setup.py sdist") # build sdist shutil.copy('dist/%s' % sdist_name, '%s/%s' % (mod_root, sdist_name)) # copy into tree shutil.move("VERSION.bak", "VERSION") # restore version with open(path + "/SDIST", "w") as f: f.write(sdist_name + "\n") return version_base, version_detail, sdist_name except Exception as e : raise RuntimeError('Could not extract/set version: %s' % e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def isgood(name): """ Whether name should be installed """
if not isbad(name): if name.endswith('.py') or name.endswith('.json') or name.endswith('.tar'): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def meld(*values): """Return the repeated value, or the first value if there's only one. This is a convenience function, equivalent to calling getvalue(repeated(x)) to get x. This function skips over instances of None in values (None is not allowed in repeated variables). Examples: meld("foo", "bar") # => ListRepetition("foo", "bar") meld("foo", "foo") # => ListRepetition("foo", "foo") meld("foo", None) # => "foo" meld(None) # => None """
values = [x for x in values if x is not None] if not values: return None result = repeated(*values) if isrepeating(result): return result return getvalue(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getvalue(x): """Return the single value of x or raise TypError if more than one value."""
if isrepeating(x): raise TypeError( "Ambiguous call to getvalue for %r which has more than one value." % x) for value in getvalues(x): return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Convert current Task into a dictionary :return: python dictionary """
task_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'pre_exec': self._pre_exec, 'executable': self._executable, 'arguments': self._arguments, 'post_exec': self._post_exec, 'cpu_reqs': self._cpu_reqs, 'gpu_reqs': self._gpu_reqs, 'lfs_per_process': self._lfs_per_process, 'upload_input_data': self._upload_input_data, 'copy_input_data': self._copy_input_data, 'link_input_data': self._link_input_data, 'move_input_data': self._move_input_data, 'copy_output_data': self._copy_output_data, 'move_output_data': self._move_output_data, 'download_output_data': self._download_output_data, 'stdout': self._stdout, 'stderr': self._stderr, 'exit_code': self._exit_code, 'path': self._path, 'tag': self._tag, 'parent_stage': self._p_stage, 'parent_pipeline': self._p_pipeline, } return task_desc_as_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keyword(tokens, expected): """Case-insensitive keyword match."""
try: token = next(iter(tokens)) except StopIteration: return if token and token.name == "symbol" and token.value.lower() == expected: return TokenMatch(None, token.value, (token,))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def multi_keyword(tokens, keyword_parts): """Match a case-insensitive keyword consisting of multiple tokens."""
tokens = iter(tokens) matched_tokens = [] limit = len(keyword_parts) for idx in six.moves.range(limit): try: token = next(tokens) except StopIteration: return if (not token or token.name != "symbol" or token.value.lower() != keyword_parts[idx]): return matched_tokens.append(token) return TokenMatch(None, token.value, matched_tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prefix(tokens, operator_table): """Match a prefix of an operator."""
operator, matched_tokens = operator_table.prefix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def infix(tokens, operator_table): """Match an infix of an operator."""
operator, matched_tokens = operator_table.infix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def suffix(tokens, operator_table): """Match a suffix of an operator."""
operator, matched_tokens = operator_table.suffix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_tokens(expected_tokens): """Generate a grammar function that will match 'expected_tokens' only."""
if isinstance(expected_tokens, Token): # Match a single token. def _grammar_func(tokens): try: next_token = next(iter(tokens)) except StopIteration: return if next_token == expected_tokens: return TokenMatch(None, next_token.value, (next_token,)) elif isinstance(expected_tokens, tuple): # Match multiple tokens. match_len = len(expected_tokens) def _grammar_func(tokens): upcoming = tuple(itertools.islice(tokens, match_len)) if upcoming == expected_tokens: return TokenMatch(None, None, upcoming) else: raise TypeError( "'expected_tokens' must be an instance of Token or a tuple " "thereof. Got %r." % expected_tokens) return _grammar_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expression(self, previous_precedence=0): """An expression is an atom or an infix expression. Grammar (sort of, actually a precedence-climbing parser): expression = atom [ binary_operator expression ] . Args: previous_precedence: What operator precedence should we start with? """
lhs = self.atom() return self.operator(lhs, previous_precedence)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def accept_operator(self, precedence): """Accept the next binary operator only if it's of higher precedence."""
match = grammar.infix(self.tokens) if not match: return if match.operator.precedence < precedence: return # The next thing is an operator that we want. Now match it for real. return self.tokens.accept(grammar.infix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def operator(self, lhs, min_precedence): """Climb operator precedence as long as there are operators. This function implements a basic precedence climbing parser to deal with binary operators in a sane fashion. The outer loop will keep spinning as long as the next token is an operator with a precedence of at least 'min_precedence', parsing operands as atoms (which, in turn, recurse into 'expression' which recurses back into 'operator'). This supports both left- and right-associativity. The only part of the code that's not a regular precedence-climber deals with mixfix operators. A mixfix operator in DottySQL consists of an infix part and a suffix (they are still binary, they just have a terminator). """
# Spin as long as the next token is an operator of higher # precedence. (This may not do anything, which is fine.) while self.accept_operator(precedence=min_precedence): operator = self.tokens.matched.operator # If we're parsing a mixfix operator we can keep going until # the suffix. if operator.suffix: rhs = self.expression() self.tokens.expect(common_grammar.match_tokens(operator.suffix)) rhs.end = self.tokens.matched.end elif operator.name == ".": # The dot operator changes the meaning of RHS. rhs = self.dot_rhs() else: # The right hand side is an atom, which might turn out to be # an expression. Isn't recursion exciting? rhs = self.atom() # Keep going as long as the next token is an infix operator of # higher precedence. next_min_precedence = operator.precedence if operator.assoc == "left": next_min_precedence += 1 while self.tokens.match(grammar.infix): if (self.tokens.matched.operator.precedence < next_min_precedence): break rhs = self.operator(rhs, self.tokens.matched.operator.precedence) lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end, source=self.original) return lhs