_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q18300
create_supercut
train
def create_supercut(composition, outputfile, padding): """Concatenate video clips together and output finished video file to the output directory. """ print("[+] Creating clips.") demo_supercut(composition, padding) # add padding when necessary for (clip, nextclip) in zip(composition, composition[1:]): if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])): nextclip['start'] += padding # put all clips together: all_filenames = set([c['file'] for c in composition]) videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames]) cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition] print("[+] Concatenating clips.") final_clip = concatenate(cut_clips) print("[+] Writing ouput file.") final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
python
{ "resource": "" }
q18301
create_supercut_in_batches
train
def create_supercut_in_batches(composition, outputfile, padding): """Create & concatenate video clips in groups of size BATCH_SIZE and output finished video file to output directory. """ total_clips = len(composition) start_index = 0 end_index = BATCH_SIZE batch_comp = [] while start_index < total_clips: filename = outputfile + '.tmp' + str(start_index) + '.mp4' try: create_supercut(composition[start_index:end_index], filename, padding) batch_comp.append(filename) gc.collect() start_index += BATCH_SIZE end_index += BATCH_SIZE except: start_index += BATCH_SIZE end_index += BATCH_SIZE next clips = [VideoFileClip(filename) for filename in batch_comp] video = concatenate(clips) video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac') # remove partial video files for filename in batch_comp: os.remove(filename) cleanup_log_files(outputfile)
python
{ "resource": "" }
q18302
search_line
train
def search_line(line, search, searchtype): """Return True if search term is found in given line, False otherwise.""" if searchtype == 're' or searchtype == 'word': return re.search(search, line) #, re.IGNORECASE) elif searchtype == 'pos': return searcher.search_out(line, search) elif searchtype == 'hyper': return searcher.hypernym_search(line, search)
python
{ "resource": "" }
q18303
get_subtitle_files
train
def get_subtitle_files(inputfile): """Return a list of subtitle files.""" srts = [] for f in inputfile: filename = f.split('.') filename[-1] = 'srt' srt = '.'.join(filename) if os.path.isfile(srt): srts.append(srt) if len(srts) == 0: print("[!] No subtitle files were found.") return False return srts
python
{ "resource": "" }
q18304
get_vtt_files
train
def get_vtt_files(inputfile): """Return a list of vtt files.""" vtts = [] for f in inputfile: filename = f.split('.') filename = '.'.join(filename[0:-1]) vtt = glob(filename + '*.vtt') if len(vtt) > 0: vtts.append({'vtt': vtt[0], 'video': f}) if len(vtts) == 0: print("[!] No vtt files were found.") return False return vtts
python
{ "resource": "" }
q18305
videogrep
train
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False): """Search through and find all instances of the search term in an srt or transcript, create a supercut around that instance, and output a new video file comprised of those supercuts. """ padding = padding / 1000.0 sync = sync / 1000.0 composition = [] foundSearchTerm = False if use_transcript: composition = compose_from_transcript(inputfile, search, searchtype) elif use_vtt: vtts = get_vtt_files(inputfile) composition = compose_from_vtt(vtts, search, searchtype) else: srts = get_subtitle_files(inputfile) composition = compose_from_srts(srts, search, searchtype) # If the search term was not found in any subtitle file... if len(composition) == 0: print("[!] Search term '" + search + "'" + " was not found in any file.") exit(1) else: print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.") # apply padding and sync for c in composition: c['start'] = c['start'] + sync - padding c['end'] = c['end'] + sync + padding if maxclips > 0: composition = composition[:maxclips] if randomize is True: random.shuffle(composition) if test is True: demo_supercut(composition, padding) else: if os.path.splitext(outputfile)[1].lower() == '.edl': make_edl(composition, outputfile) elif export_clips: split_clips(composition, outputfile) else: if len(composition) > BATCH_SIZE: print("[+} Starting batch job.") create_supercut_in_batches(composition, outputfile, padding) else: create_supercut(composition, outputfile, padding)
python
{ "resource": "" }
q18306
basic_color
train
def basic_color(code): """ 16 colors supported """ def inner(text, rl=False): """ Every raw_input with color sequences should be called with rl=True to avoid readline messed up the length calculation """ c = code if rl: return "\001\033[%sm\002%s\001\033[0m\002" % (c, text) else: return "\033[%sm%s\033[0m" % (c, text) return inner
python
{ "resource": "" }
q18307
term_color
train
def term_color(code): """ 256 colors supported """ def inner(text, rl=False): """ Every raw_input with color sequences should be called with rl=True to avoid readline messed up the length calculation """ c = code if rl: return "\001\033[38;5;%sm\002%s\001\033[0m\002" % (c, text) else: return "\033[38;5;%sm%s\033[0m" % (c, text) return inner
python
{ "resource": "" }
q18308
color_func
train
def color_func(func_name): """ Call color function base on name """ if str(func_name).isdigit(): return term_color(int(func_name)) return globals()[func_name]
python
{ "resource": "" }
q18309
MPlayer._send_command
train
def _send_command(self, cmd, expect=None): """Send a command to MPlayer. cmd: the command string expect: expect the output starts with a certain string The result, if any, is returned as a string. """ if not self.is_alive: raise NotPlayingError() logger.debug("Send command to mplayer: " + cmd) cmd = cmd + "\n" # In Py3k, TypeErrors will be raised because cmd is a string but stdin # expects bytes. In Python 2.x on the other hand, UnicodeEncodeErrors # will be raised if cmd is unicode. In both cases, encoding the string # will fix the problem. try: self.sub_proc.stdin.write(cmd) except (TypeError, UnicodeEncodeError): self.sub_proc.stdin.write(cmd.encode('utf-8', 'ignore')) time.sleep(0.1) # wait for mplayer (better idea?) # Expect a response for 'get_property' only if not expect: return while True: try: output = self.sub_proc.stdout.readline().rstrip() output = output.decode('utf-8') except IOError: return None # print output split_output = output.split('=') # print(split_output) if len(split_output) == 2 and split_output[0].strip() == expect: # We found it value = split_output[1] return value.strip()
python
{ "resource": "" }
q18310
EntitySpec.stream
train
def stream( self, accountID, **kwargs ): """ Get a stream of Transactions for an Account starting from when the request is made. Args: accountID: Account Identifier Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts/{accountID}/transactions/stream' ) request.set_path_param( 'accountID', accountID ) request.set_stream(True) class Parser(): def __init__(self, ctx): self.ctx = ctx def __call__(self, line): j = json.loads(line.decode('utf-8')) type = j.get("type") if type is None: return ("unknown", j) elif type == "HEARTBEAT": return ( "transaction.TransactionHeartbeat", self.ctx.transaction.TransactionHeartbeat.from_dict( j, self.ctx ) ) transaction = self.ctx.transaction.Transaction.from_dict( j, self.ctx ) return ( "transaction.Transaction", transaction ) request.set_line_parser( Parser(self.ctx) ) response = self.ctx.request(request) return response
python
{ "resource": "" }
q18311
EntitySpec.price
train
def price( self, instrument, **kwargs ): """ Fetch a price for an instrument. Accounts are not associated in any way with this endpoint. Args: instrument: Name of the Instrument time: The time at which the desired price is in effect. The current price is returned if no time is provided. Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/instruments/{instrument}/price' ) request.set_path_param( 'instrument', instrument ) request.set_param( 'time', kwargs.get('time') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('price') is not None: parsed_body['price'] = \ self.ctx.pricing_common.Price.from_dict( jbody['price'], self.ctx ) elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18312
EntitySpec.list
train
def list( self, accountID, **kwargs ): """ Get a list of Orders for an Account Args: accountID: Account Identifier ids: List of Order IDs to retrieve state: The state to filter the requested Orders by instrument: The instrument to filter the requested orders by count: The maximum number of Orders to return beforeID: The maximum Order ID to return. If not provided the most recent Orders in the Account are returned Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts/{accountID}/orders' ) request.set_path_param( 'accountID', accountID ) request.set_param( 'ids', kwargs.get('ids') ) request.set_param( 'state', kwargs.get('state') ) request.set_param( 'instrument', kwargs.get('instrument') ) request.set_param( 'count', kwargs.get('count') ) request.set_param( 'beforeID', kwargs.get('beforeID') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('orders') is not None: parsed_body['orders'] = [ self.ctx.order.Order.from_dict(d, self.ctx) for d in jbody.get('orders') ] if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18313
EntitySpec.cancel
train
def cancel( self, accountID, orderSpecifier, **kwargs ): """ Cancel a pending Order in an Account Args: accountID: Account Identifier orderSpecifier: The Order Specifier Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel' ) request.set_path_param( 'accountID', accountID ) request.set_path_param( 'orderSpecifier', orderSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('orderCancelTransaction') is not None: parsed_body['orderCancelTransaction'] = \ self.ctx.transaction.OrderCancelTransaction.from_dict( jbody['orderCancelTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('orderCancelRejectTransaction') is not None: parsed_body['orderCancelRejectTransaction'] = \ self.ctx.transaction.OrderCancelRejectTransaction.from_dict( jbody['orderCancelRejectTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18314
EntitySpec.market
train
def market(self, accountID, **kwargs): """ Shortcut to create a Market Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=MarketOrderRequest(**kwargs) )
python
{ "resource": "" }
q18315
EntitySpec.limit
train
def limit(self, accountID, **kwargs): """ Shortcut to create a Limit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=LimitOrderRequest(**kwargs) )
python
{ "resource": "" }
q18316
EntitySpec.limit_replace
train
def limit_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending Limit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Limit Order to replace kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=LimitOrderRequest(**kwargs) )
python
{ "resource": "" }
q18317
EntitySpec.stop
train
def stop(self, accountID, **kwargs): """ Shortcut to create a Stop Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=StopOrderRequest(**kwargs) )
python
{ "resource": "" }
q18318
EntitySpec.stop_replace
train
def stop_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending Stop Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Order to replace kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=StopOrderRequest(**kwargs) )
python
{ "resource": "" }
q18319
EntitySpec.market_if_touched
train
def market_if_touched(self, accountID, **kwargs): """ Shortcut to create a MarketIfTouched Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=MarketIfTouchedOrderRequest(**kwargs) )
python
{ "resource": "" }
q18320
EntitySpec.market_if_touched_replace
train
def market_if_touched_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs) )
python
{ "resource": "" }
q18321
EntitySpec.take_profit
train
def take_profit(self, accountID, **kwargs): """ Shortcut to create a Take Profit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=TakeProfitOrderRequest(**kwargs) )
python
{ "resource": "" }
q18322
EntitySpec.take_profit_replace
train
def take_profit_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending Take Profit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Take Profit Order to replace kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=TakeProfitOrderRequest(**kwargs) )
python
{ "resource": "" }
q18323
EntitySpec.stop_loss
train
def stop_loss(self, accountID, **kwargs): """ Shortcut to create a Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=StopLossOrderRequest(**kwargs) )
python
{ "resource": "" }
q18324
EntitySpec.stop_loss_replace
train
def stop_loss_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending Stop Loss Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Loss Order to replace kwargs : The arguments to create a StopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=StopLossOrderRequest(**kwargs) )
python
{ "resource": "" }
q18325
EntitySpec.trailing_stop_loss
train
def trailing_stop_loss(self, accountID, **kwargs): """ Shortcut to create a Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=TrailingStopLossOrderRequest(**kwargs) )
python
{ "resource": "" }
q18326
EntitySpec.trailing_stop_loss_replace
train
def trailing_stop_loss_replace(self, accountID, orderID, **kwargs): """ Shortcut to replace a pending Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Take Profit Order to replace kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.replace( accountID, orderID, order=TrailingStopLossOrderRequest(**kwargs) )
python
{ "resource": "" }
q18327
Context.set_token
train
def set_token(self, token): """ Set the token for the v20 context Args: token: The token used to access the v20 REST api """ self.token = token self.set_header( 'Authorization', "Bearer {}".format(token) )
python
{ "resource": "" }
q18328
Context.set_datetime_format
train
def set_datetime_format(self, format): """ Set the Accept-Datetime-Format header to an acceptable value Args: format: UNIX or RFC3339 """ if not format in ["UNIX", "RFC3339"]: return self.datetime_format = format self.set_header("Accept-Datetime-Format", self.datetime_format)
python
{ "resource": "" }
q18329
Context.request
train
def request(self, request): """ Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object """ url = "{}{}".format(self._base_url, request.path) timeout = self.poll_timeout if request.stream is True: timeout = self.stream_timeout try: http_response = self._session.request( request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout ) except requests.exceptions.ConnectionError: raise V20ConnectionError(url) except requests.exceptions.ConnectTimeout: raise V20Timeout(url, "connect") except requests.exceptions.ReadTimeout: raise V20Timeout(url, "read") request.headers = http_response.request.headers response = Response( request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers ) if request.stream: response.set_line_parser( request.line_parser ) response.set_lines( http_response.iter_lines( self.stream_chunk_size ) ) else: response.set_raw_body(http_response.text) return response
python
{ "resource": "" }
q18330
EntitySpec.list
train
def list( self, **kwargs ): """ Get a list of all Accounts authorized for the provided token. Args: Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts' ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('accounts') is not None: parsed_body['accounts'] = [ self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts') ] elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18331
EntitySpec.instruments
train
def instruments( self, accountID, **kwargs ): """ Get the list of tradeable instruments for the given Account. The list of tradeable instruments is dependent on the regulatory division that the Account is located in, thus should be the same for all Accounts owned by a single user. Args: accountID: Account Identifier instruments: List of instruments to query specifically. Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts/{accountID}/instruments' ) request.set_path_param( 'accountID', accountID ) request.set_param( 'instruments', kwargs.get('instruments') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('instruments') is not None: parsed_body['instruments'] = [ self.ctx.primitives.Instrument.from_dict(d, self.ctx) for d in jbody.get('instruments') ] if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18332
EntitySpec.get_info
train
def get_info( self, userSpecifier, **kwargs ): """ Fetch the user information for the specified user. This endpoint is intended to be used by the user themself to obtain their own information. Args: userSpecifier: The User Specifier Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/users/{userSpecifier}' ) request.set_path_param( 'userSpecifier', userSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('userInfo') is not None: parsed_body['userInfo'] = \ self.ctx.user.UserInfo.from_dict( jbody['userInfo'], self.ctx ) elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18333
EntitySpec.get
train
def get( self, accountID, **kwargs ): """ Get pricing information for a specified list of Instruments within an Account. Args: accountID: Account Identifier instruments: List of Instruments to get pricing for. since: Date/Time filter to apply to the response. Only prices and home conversions (if requested) with a time later than this filter (i.e. the price has changed after the since time) will be provided, and are filtered independently. includeUnitsAvailable: Flag that enables the inclusion of the unitsAvailable field in the returned Price objects. includeHomeConversions: Flag that enables the inclusion of the homeConversions field in the returned response. An entry will be returned for each currency in the set of all base and quote currencies present in the requested instruments list. Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/accounts/{accountID}/pricing' ) request.set_path_param( 'accountID', accountID ) request.set_param( 'instruments', kwargs.get('instruments') ) request.set_param( 'since', kwargs.get('since') ) request.set_param( 'includeUnitsAvailable', kwargs.get('includeUnitsAvailable') ) request.set_param( 'includeHomeConversions', kwargs.get('includeHomeConversions') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('prices') is not None: parsed_body['prices'] = [ self.ctx.pricing.ClientPrice.from_dict(d, self.ctx) for d in jbody.get('prices') ] if jbody.get('homeConversions') is not None: parsed_body['homeConversions'] = [ self.ctx.pricing.HomeConversions.from_dict(d, self.ctx) for d in jbody.get('homeConversions') ] if jbody.get('time') is not None: parsed_body['time'] = \ jbody.get('time') elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
python
{ "resource": "" }
q18334
OutputHandler.output_for_skipping_run_set
train
def output_for_skipping_run_set(self, runSet, reason=None): ''' This function writes a simple message to terminal and logfile, when a run set is skipped. There is no message about skipping a run set in the xml-file. ''' # print to terminal util.printOut("\nSkipping run set" + (" '" + runSet.name + "'" if runSet.name else "") + (" " + reason if reason else "") ) # write into txt_file runSetInfo = "\n\n" if runSet.name: runSetInfo += runSet.name + "\n" runSetInfo += "Run set {0} of {1}: skipped {2}\n".format( runSet.index, len(self.benchmark.run_sets), reason or "") self.txt_file.append(runSetInfo)
python
{ "resource": "" }
q18335
OutputHandler.writeRunSetInfoToLog
train
def writeRunSetInfoToLog(self, runSet): """ This method writes the information about a run set into the txt_file. """ runSetInfo = "\n\n" if runSet.name: runSetInfo += runSet.name + "\n" runSetInfo += "Run set {0} of {1} with options '{2}' and propertyfile '{3}'\n\n".format( runSet.index, len(self.benchmark.run_sets), " ".join(runSet.options), runSet.propertyfile) titleLine = self.create_output_line(runSet, "inputfile", "status", "cpu time", "wall time", "host", self.benchmark.columns, True) runSet.simpleLine = "-" * (len(titleLine)) runSetInfo += titleLine + "\n" + runSet.simpleLine + "\n" # write into txt_file self.txt_file.append(runSetInfo)
python
{ "resource": "" }
q18336
OutputHandler.runs_to_xml
train
def runs_to_xml(self, runSet, runs, blockname=None): """ This function creates the XML structure for a list of runs """ # copy benchmarkinfo, limits, columntitles, systeminfo from xml_header runsElem = util.copy_of_xml_element(self.xml_header) runsElem.set("options", " ".join(runSet.options)) if blockname is not None: runsElem.set("block", blockname) runsElem.set("name", ((runSet.real_name + ".") if runSet.real_name else "") + blockname) elif runSet.real_name: runsElem.set("name", runSet.real_name) # collect XMLelements from all runs for run in runs: runsElem.append(run.xml) return runsElem
python
{ "resource": "" }
q18337
OutputHandler.add_values_to_run_xml
train
def add_values_to_run_xml(self, run): """ This function adds the result values to the XML representation of a run. """ runElem = run.xml for elem in list(runElem): runElem.remove(elem) self.add_column_to_xml(runElem, 'status', run.status) self.add_column_to_xml(runElem, 'cputime', run.cputime) self.add_column_to_xml(runElem, 'walltime', run.walltime) self.add_column_to_xml(runElem, '@category', run.category) # hidden self.add_column_to_xml(runElem, '', run.values) for column in run.columns: self.add_column_to_xml(runElem, column.title, column.value) # Sort child elements by hidden and title attributes runElem[:] = sorted(runElem, key=lambda elem : (elem.get('hidden', ''), elem.get('title')))
python
{ "resource": "" }
q18338
OutputHandler.add_values_to_run_set_xml
train
def add_values_to_run_set_xml(self, runSet, cputime, walltime, energy): """ This function adds the result values to the XML representation of a runSet. """ self.add_column_to_xml(runSet.xml, 'cputime', cputime) self.add_column_to_xml(runSet.xml, 'walltime', walltime) energy = intel_cpu_energy.format_energy_results(energy) for energy_key, energy_value in energy.items(): self.add_column_to_xml(runSet.xml, energy_key, energy_value)
python
{ "resource": "" }
q18339
OutputHandler.format_sourcefile_name
train
def format_sourcefile_name(self, fileName, runSet): ''' Formats the file name of a program for printing on console. ''' if fileName.startswith(runSet.common_prefix): fileName = fileName[len(runSet.common_prefix):] return fileName.ljust(runSet.max_length_of_filename + 4)
python
{ "resource": "" }
q18340
OutputHandler._write_pretty_result_xml_to_file
train
def _write_pretty_result_xml_to_file(self, xml, filename): """Writes a nicely formatted XML file with DOCTYPE, and compressed if necessary.""" if self.compress_results: actual_filename = filename + ".bz2" # Use BZ2File directly or our hack for Python 3.2 open_func = bz2.BZ2File if hasattr(bz2.BZ2File, 'writable') else util.BZ2FileHack else: # write content to temp file first to prevent loosing data # in existing file if writing fails actual_filename = filename + ".tmp" open_func = open with io.TextIOWrapper(open_func(actual_filename, 'wb'), encoding='utf-8') as file: rough_string = ET.tostring(xml, encoding='unicode') reparsed = minidom.parseString(rough_string) doctype = minidom.DOMImplementation().createDocumentType( 'result', RESULT_XML_PUBLIC_ID, RESULT_XML_SYSTEM_ID) reparsed.insertBefore(doctype, reparsed.documentElement) reparsed.writexml(file, indent="", addindent=" ", newl="\n", encoding="utf-8") if self.compress_results: # try to delete uncompressed file (would have been overwritten in no-compress-mode) try: os.remove(filename) except OSError: pass self.all_created_files.discard(filename) self.all_created_files.add(actual_filename) else: os.rename(actual_filename, filename) self.all_created_files.add(filename) return filename
python
{ "resource": "" }
q18341
get_extract_value_function
train
def get_extract_value_function(column_identifier): """ returns a function that extracts the value for a column. """ def extract_value(run_result): pos = None for i, column in enumerate(run_result.columns): if column.title == column_identifier: pos = i break if pos is None: sys.exit('CPU time missing for task {0}.'.format(run_result.task_id[0])) return Util.to_decimal(run_result.values[pos]) return extract_value
python
{ "resource": "" }
q18342
check_memory_size
train
def check_memory_size(memLimit, num_of_threads, memoryAssignment, my_cgroups): """Check whether the desired amount of parallel benchmarks fits in the memory. Implemented are checks for memory limits via cgroup controller "memory" and memory bank restrictions via cgroup controller "cpuset", as well as whether the system actually has enough memory installed. @param memLimit: the memory limit in bytes per run @param num_of_threads: the number of parallel benchmark executions @param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs) """ try: # Check amount of memory allowed via cgroups. def check_limit(actualLimit): if actualLimit < memLimit: sys.exit("Cgroups allow only {} bytes of memory to be used, cannot execute runs with {} bytes of memory.".format(actualLimit, memLimit)) elif actualLimit < memLimit * num_of_threads: sys.exit("Cgroups allow only {} bytes of memory to be used, not enough for {} benchmarks with {} bytes each. Please reduce the number of threads".format(actualLimit, num_of_threads, memLimit)) if not os.path.isdir('/sys/devices/system/node/'): logging.debug("System without NUMA support in Linux kernel, ignoring memory assignment.") return if cgroups.MEMORY in my_cgroups: # We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes # because the former may be lower if memory.use_hierarchy is enabled. for key, value in my_cgroups.get_key_value_pairs(cgroups.MEMORY, 'stat'): if key == 'hierarchical_memory_limit' or key == 'hierarchical_memsw_limit': check_limit(int(value)) # Get list of all memory banks, either from memory assignment or from system. if not memoryAssignment: if cgroups.CPUSET in my_cgroups: allMems = my_cgroups.read_allowed_memory_banks() else: allMems = _get_memory_banks_listed_in_dir('/sys/devices/system/node/') memoryAssignment = [allMems] * num_of_threads # "fake" memory assignment: all threads on all banks else: allMems = set(itertools.chain(*memoryAssignment)) memSizes = dict((mem, _get_memory_bank_size(mem)) for mem in allMems) except ValueError as e: sys.exit("Could not read memory information from kernel: {0}".format(e)) # Check whether enough memory is allocatable on the assigned memory banks. # As the sum of the sizes of the memory banks is at most the total size of memory in the system, # and we do this check always even if the banks are not restricted, # this also checks whether the system has actually enough memory installed. usedMem = collections.Counter() for mems_of_run in memoryAssignment: totalSize = sum(memSizes[mem] for mem in mems_of_run) if totalSize < memLimit: sys.exit("Memory banks {} do not have enough memory for one run, only {} bytes available.".format(mems_of_run, totalSize)) usedMem[tuple(mems_of_run)] += memLimit if usedMem[tuple(mems_of_run)] > totalSize: sys.exit("Memory banks {} do not have enough memory for all runs, only {} bytes available. Please reduce the number of threads.".format(mems_of_run, totalSize))
python
{ "resource": "" }
q18343
_get_memory_bank_size
train
def _get_memory_bank_size(memBank): """Get the size of a memory bank in bytes.""" fileName = '/sys/devices/system/node/node{0}/meminfo'.format(memBank) size = None with open(fileName) as f: for line in f: if 'MemTotal' in line: size = line.split(':')[1].strip() if size[-3:] != ' kB': raise ValueError('"{}" in file {} is not a memory size.'.format(size, fileName)) size = int(size[:-3]) * 1024 # kernel uses KiB but names them kB, convert to Byte logging.debug("Memory bank %s has size %s bytes.", memBank, size) return size raise ValueError('Failed to read total memory from {}.'.format(fileName))
python
{ "resource": "" }
q18344
Tool.determine_result
train
def determine_result(self, returncode, returnsignal, output, isTimeout): """ Returns a BenchExec result status based on the output of SMACK """ splitout = "\n".join(output) if 'SMACK found no errors' in splitout: return result.RESULT_TRUE_PROP errmsg = re.search(r'SMACK found an error(:\s+([^\.]+))?\.', splitout) if errmsg: errtype = errmsg.group(2) if errtype: if 'invalid pointer dereference' == errtype: return result.RESULT_FALSE_DEREF elif 'invalid memory deallocation' == errtype: return result.RESULT_FALSE_FREE elif 'memory leak' == errtype: return result.RESULT_FALSE_MEMTRACK elif 'memory cleanup' == errtype: return result.RESULT_FALSE_MEMCLEANUP elif 'integer overflow' == errtype: return result.RESULT_FALSE_OVERFLOW else: return result.RESULT_FALSE_REACH return result.RESULT_UNKNOWN
python
{ "resource": "" }
q18345
ContainerExecutor._get_result_files_base
train
def _get_result_files_base(self, temp_dir): """Given the temp directory that is created for each run, return the path to the directory where files created by the tool are stored.""" if not self._use_namespaces: return super(ContainerExecutor, self)._get_result_files_base(temp_dir) else: return os.path.join(temp_dir, "temp")
python
{ "resource": "" }
q18346
ContainerExecutor.execute_run
train
def execute_run(self, args, workingDir=None, output_dir=None, result_files_patterns=[], rootDir=None, environ=os.environ.copy()): """ This method executes the command line and waits for the termination of it, handling all setup and cleanup. @param args: the command line to run @param rootDir: None or a root directory that contains all relevant files for starting a new process @param workingDir: None or a directory which the execution should use as working directory @param output_dir: the directory where to write result files (required if result_files_pattern) @param result_files_patterns: a list of patterns of files to retrieve as result files """ # preparations temp_dir = None if rootDir is None: temp_dir = tempfile.mkdtemp(prefix="Benchexec_run_") pid = None returnvalue = 0 logging.debug('Starting process.') try: pid, result_fn = self._start_execution(args=args, stdin=None, stdout=None, stderr=None, env=environ, root_dir=rootDir, cwd=workingDir, temp_dir=temp_dir, cgroups=Cgroup({}), output_dir=output_dir, result_files_patterns=result_files_patterns, child_setup_fn=util.dummy_fn, parent_setup_fn=util.dummy_fn, parent_cleanup_fn=util.dummy_fn) with self.SUB_PROCESS_PIDS_LOCK: self.SUB_PROCESS_PIDS.add(pid) returnvalue, unused_ru_child, unused = result_fn() # blocks until process has terminated finally: # cleanup steps that need to get executed even in case of failure logging.debug('Process terminated, exit code %s.', returnvalue) with self.SUB_PROCESS_PIDS_LOCK: self.SUB_PROCESS_PIDS.discard(pid) if temp_dir is not None: logging.debug('Cleaning up temporary directory.') util.rmtree(temp_dir, onerror=util.log_rmtree_error) # cleanup steps that are only relevant in case of success return util.ProcessExitCode.from_raw(returnvalue)
python
{ "resource": "" }
q18347
ContainerExecutor._setup_root_filesystem
train
def _setup_root_filesystem(self, root_dir): """Setup the filesystem layout in the given root directory. Create a copy of the existing proc- and dev-mountpoints in the specified root directory. Afterwards we chroot into it. @param root_dir: The path of the root directory that is used to execute the process. """ root_dir = root_dir.encode() # Create an empty proc folder into the root dir. The grandchild still needs a view of # the old /proc, therefore we do not mount a fresh /proc here. proc_base = os.path.join(root_dir, b"proc") util.makedirs(proc_base, exist_ok=True) dev_base = os.path.join(root_dir, b"dev") util.makedirs(dev_base, exist_ok=True) # Create a copy of the host's dev- and proc-mountpoints. # They are marked as private in order to not being changed # by existing mounts during run execution. container.make_bind_mount(b"/dev/", dev_base, recursive=True, private=True) container.make_bind_mount(b"/proc/", proc_base, recursive=True, private=True) os.chroot(root_dir)
python
{ "resource": "" }
q18348
ContainerExecutor._transfer_output_files
train
def _transfer_output_files(self, tool_output_dir, working_dir, output_dir, patterns): """Transfer files created by the tool in the container to the output directory. @param tool_output_dir: The directory under which all tool output files are created. @param working_dir: The absolute working directory of the tool in the container. @param output_dir: the directory where to write result files @param patterns: a list of patterns of files to retrieve as result files """ assert output_dir and patterns if any(os.path.isabs(pattern) for pattern in patterns): base_dir = tool_output_dir else: base_dir = tool_output_dir + working_dir def transfer_file(abs_file): assert abs_file.startswith(base_dir) # We ignore (empty) directories, because we create them for hidden dirs etc. # We ignore device nodes, because overlayfs creates them. # We also ignore all other files (symlinks, fifos etc.), # because they are probably irrelevant, and just handle regular files. file = os.path.join("/", os.path.relpath(abs_file, base_dir)) if (os.path.isfile(abs_file) and not os.path.islink(abs_file) and not container.is_container_system_config_file(file)): target = output_dir + file logging.debug("Transferring output file %s to %s", abs_file, target) try: os.makedirs(os.path.dirname(target)) except EnvironmentError: pass # exist_ok=True not supported on Python 2 try: # move is more efficient than copy in case both abs_file and target # are on the same filesystem, and it avoids matching the file again # with the next pattern. shutil.move(abs_file, target) except EnvironmentError as e: logging.warning("Could not retrieve output file '%s': %s", file, e) for pattern in patterns: if os.path.isabs(pattern): pattern = tool_output_dir + pattern else: pattern = tool_output_dir + os.path.join(working_dir, pattern) # normalize pattern for preventing directory traversal attacks: for abs_file in util.maybe_recursive_iglob(os.path.normpath(pattern), recursive=True): # Recursive matching is only supported starting with Python 3.5, # so we allow the user to match directories and transfer them recursively. if os.path.isdir(abs_file): for root, unused_dirs, files in os.walk(abs_file): for file in files: transfer_file(os.path.join(root, file)) else: transfer_file(abs_file)
python
{ "resource": "" }
q18349
parse_table_definition_file
train
def parse_table_definition_file(file): ''' Read an parse the XML of a table-definition file. @return: an ElementTree object for the table definition ''' logging.info("Reading table definition from '%s'...", file) if not os.path.isfile(file): logging.error("File '%s' does not exist.", file) exit(1) try: tableGenFile = ElementTree.ElementTree().parse(file) except IOError as e: logging.error('Could not read result file %s: %s', file, e) exit(1) except ElementTree.ParseError as e: logging.error('Table file %s is invalid: %s', file, e) exit(1) if 'table' != tableGenFile.tag: logging.error("Table file %s is invalid: It's root element is not named 'table'.", file) exit(1) return tableGenFile
python
{ "resource": "" }
q18350
load_results_from_table_definition
train
def load_results_from_table_definition(table_definition, table_definition_file, options): """ Load all results in files that are listed in the given table-definition file. @return: a list of RunSetResult objects """ default_columns = extract_columns_from_table_definition_file(table_definition, table_definition_file) columns_relevant_for_diff = _get_columns_relevant_for_diff(default_columns) results = [] for tag in table_definition: if tag.tag == 'result': columns = extract_columns_from_table_definition_file(tag, table_definition_file) or default_columns run_set_id = tag.get('id') for resultsFile in get_file_list_from_result_tag(tag, table_definition_file): results.append(parallel.submit( load_result, resultsFile, options, run_set_id, columns, columns_relevant_for_diff)) elif tag.tag == 'union': results.append(parallel.submit( handle_union_tag, tag, table_definition_file, options, default_columns, columns_relevant_for_diff)) return [future.result() for future in results]
python
{ "resource": "" }
q18351
load_results_with_table_definition
train
def load_results_with_table_definition(result_files, table_definition, table_definition_file, options): """ Load results from given files with column definitions taken from a table-definition file. @return: a list of RunSetResult objects """ columns = extract_columns_from_table_definition_file(table_definition, table_definition_file) columns_relevant_for_diff = _get_columns_relevant_for_diff(columns) return load_results( result_files, options=options, columns=columns, columns_relevant_for_diff=columns_relevant_for_diff)
python
{ "resource": "" }
q18352
extract_columns_from_table_definition_file
train
def extract_columns_from_table_definition_file(xmltag, table_definition_file): """ Extract all columns mentioned in the result tag of a table definition file. """ def handle_path(path): """Convert path from a path relative to table-definition file.""" if not path or path.startswith("http://") or path.startswith("https://"): return path return os.path.join(os.path.dirname(table_definition_file), path) columns = list() for c in xmltag.findall('column'): scale_factor = c.get("scaleFactor") display_unit = c.get("displayUnit") source_unit = c.get("sourceUnit") new_column = Column(c.get("title"), c.text, c.get("numberOfDigits"), handle_path(c.get("href")), None, display_unit, source_unit, scale_factor, c.get("relevantForDiff"), c.get("displayTitle")) columns.append(new_column) return columns
python
{ "resource": "" }
q18353
_get_columns_relevant_for_diff
train
def _get_columns_relevant_for_diff(columns_to_show): """ Extract columns that are relevant for the diff table. @param columns_to_show: (list) A list of columns that should be shown @return: (set) Set of columns that are relevant for the diff table. If none is marked relevant, the column named "status" will be returned in the set. """ cols = set([col.title for col in columns_to_show if col.relevant_for_diff]) if len(cols) == 0: return set( [col.title for col in columns_to_show if col.title == "status"]) else: return cols
python
{ "resource": "" }
q18354
get_task_id
train
def get_task_id(task, base_path_or_url): """ Return a unique identifier for a given task. @param task: the XML element that represents a task @return a tuple with filename of task as first element """ name = task.get('name') if base_path_or_url: if Util.is_url(base_path_or_url): name = urllib.parse.urljoin(base_path_or_url, name) else: name = os.path.normpath(os.path.join(os.path.dirname(base_path_or_url), name)) task_id = [name, task.get('properties'), task.get('runset'), ] return tuple(task_id)
python
{ "resource": "" }
q18355
load_tool
train
def load_tool(result): """ Load the module with the tool-specific code. """ def load_tool_module(tool_module): if not tool_module: logging.warning('Cannot extract values from log files for benchmark results %s ' '(missing attribute "toolmodule" on tag "result").', Util.prettylist(result.attributes['name'])) return None try: logging.debug('Loading %s', tool_module) return __import__(tool_module, fromlist=['Tool']).Tool() except ImportError as ie: logging.warning( 'Missing module "%s", cannot extract values from log files (ImportError: %s).', tool_module, ie) except AttributeError: logging.warning( 'The module "%s" does not define the necessary class Tool, ' 'cannot extract values from log files.', tool_module) return None tool_module = result.attributes['toolmodule'][0] if 'toolmodule' in result.attributes else None if tool_module in loaded_tools: return loaded_tools[tool_module] else: result = load_tool_module(tool_module) loaded_tools[tool_module] = result return result
python
{ "resource": "" }
q18356
load_results
train
def load_results(result_files, options, run_set_id=None, columns=None, columns_relevant_for_diff=set()): """Version of load_result for multiple input files that will be loaded concurrently.""" return parallel.map( load_result, result_files, itertools.repeat(options), itertools.repeat(run_set_id), itertools.repeat(columns), itertools.repeat(columns_relevant_for_diff))
python
{ "resource": "" }
q18357
load_result
train
def load_result(result_file, options, run_set_id=None, columns=None, columns_relevant_for_diff=set()): """ Completely handle loading a single result file. @param result_file the file to parse @param options additional options @param run_set_id the identifier of the run set @param columns the list of columns @param columns_relevant_for_diff a set of columns that is relevant for the diff table @return a fully ready RunSetResult instance or None """ xml = parse_results_file(result_file, run_set_id=run_set_id, ignore_errors=options.ignore_errors) if xml is None: return None result = RunSetResult.create_from_xml( result_file, xml, columns=columns, all_columns=options.all_columns, columns_relevant_for_diff=columns_relevant_for_diff) result.collect_data(options.correct_only) return result
python
{ "resource": "" }
q18358
parse_results_file
train
def parse_results_file(resultFile, run_set_id=None, ignore_errors=False): ''' This function parses an XML file that contains the results of the execution of a run set. It returns the "result" XML tag. @param resultFile: The file name of the XML file that contains the results. @param run_set_id: An optional identifier of this set of results. ''' logging.info(' %s', resultFile) url = Util.make_url(resultFile) parse = ElementTree.ElementTree().parse try: with Util.open_url_seekable(url, mode='rb') as f: try: try: resultElem = parse(gzip.GzipFile(fileobj=f)) except IOError: f.seek(0) try: resultElem = parse(bz2.BZ2File(f)) except TypeError: # Python 3.2 does not support giving a file-like object to BZ2File resultElem = parse(io.BytesIO(bz2.decompress(f.read()))) except IOError: f.seek(0) resultElem = parse(f) except IOError as e: logging.error('Could not read result file %s: %s', resultFile, e) exit(1) except ElementTree.ParseError as e: logging.error('Result file %s is invalid: %s', resultFile, e) exit(1) if resultElem.tag not in ['result', 'test']: logging.error("XML file with benchmark results seems to be invalid.\n" "The root element of the file is not named 'result' or 'test'.\n" "If you want to run a table-definition file,\n" "you should use the option '-x' or '--xml'.") exit(1) if ignore_errors and 'error' in resultElem.attrib: logging.warning('Ignoring file "%s" because of error: %s', resultFile, resultElem.attrib['error']) return None if run_set_id is not None: for sourcefile in _get_run_tags_from_xml(resultElem): sourcefile.set('runset', run_set_id) insert_logfile_names(resultFile, resultElem) return resultElem
python
{ "resource": "" }
q18359
merge_task_lists
train
def merge_task_lists(runset_results, tasks): """ Set the filelists of all RunSetResult elements so that they contain the same files in the same order. For missing files a dummy element is inserted. """ for runset in runset_results: # create mapping from id to RunResult object # Use reversed list such that the first instance of equal tasks end up in dic dic = dict([(run_result.task_id, run_result) for run_result in reversed(runset.results)]) runset.results = [] # clear and repopulate results for task in tasks: run_result = dic.get(task) if run_result is None: logging.info(" No result for task '%s' in '%s'.", task[0], Util.prettylist(runset.attributes['filename'])) # create an empty dummy element run_result = RunResult(task, None, result.CATEGORY_MISSING, None, None, runset.columns, [None]*len(runset.columns)) runset.results.append(run_result)
python
{ "resource": "" }
q18360
get_rows
train
def get_rows(runSetResults): """ Create list of rows with all data. Each row consists of several RunResults. """ rows = [] for task_results in zip(*[runset.results for runset in runSetResults]): rows.append(Row(task_results)) return rows
python
{ "resource": "" }
q18361
filter_rows_with_differences
train
def filter_rows_with_differences(rows): """ Find all rows with differences in the status column. """ if not rows: # empty table return [] if len(rows[0].results) == 1: # table with single column return [] def get_index_of_column(name, cols): for i in range(0, len(cols)): if cols[i].title == name: return i return -1 def all_equal_result(listOfResults): relevant_columns = set() for res in listOfResults: for relevant_column in res.columns_relevant_for_diff: relevant_columns.add(relevant_column) if len(relevant_columns) == 0: relevant_columns.add("status") status = [] for col in relevant_columns: # It's necessary to search for the index of a column every time # because they can differ between results status.append( set( res.values[get_index_of_column(col, res.columns)] for res in listOfResults)) return reduce(lambda x, y: x and (len(y) <= 1), status, True) rowsDiff = [row for row in rows if not all_equal_result(row.results)] if len(rowsDiff) == 0: logging.info("---> NO DIFFERENCE FOUND IN SELECTED COLUMNS") elif len(rowsDiff) == len(rows): logging.info("---> DIFFERENCES FOUND IN ALL ROWS, NO NEED TO CREATE DIFFERENCE TABLE") return [] return rowsDiff
python
{ "resource": "" }
q18362
select_relevant_id_columns
train
def select_relevant_id_columns(rows): """ Find out which of the entries in Row.id are equal for all given rows. @return: A list of True/False values according to whether the i-th part of the id is always equal. """ relevant_id_columns = [True] # first column (file name) is always relevant if rows: prototype_id = rows[0].id for column in range(1, len(prototype_id)): def id_equal_to_prototype(row): return row.id[column] == prototype_id[column] relevant_id_columns.append(not all(map(id_equal_to_prototype, rows))) return relevant_id_columns
python
{ "resource": "" }
q18363
get_regression_count
train
def get_regression_count(rows, ignoreFlappingTimeouts): # for options.dump_counts """Count the number of regressions, i.e., differences in status of the two right-most results where the new one is not "better" than the old one. Any change in status between error, unknown, and wrong result is a regression. Different kind of errors or wrong results are also a regression. """ def status_is(run_result, status): # startswith is used because status can be "TIMEOUT (TRUE)" etc., which count as "TIMEOUT" return run_result.status and run_result.status.startswith(status) def any_status_is(run_results, status): for run_result in run_results: if status_is(run_result, status): return True return False regressions = 0 for row in rows: if len(row.results) < 2: return 0 # no regressions at all with only one run # "new" and "old" are the latest two results new = row.results[-1] old = row.results[-2] if new.category == result.CATEGORY_CORRECT: continue # no regression if result is correct if new.status == old.status: continue # no regression if result is the same as before if status_is(new, 'TIMEOUT') and status_is(old, 'TIMEOUT'): continue # no regression if both are some form of timeout if status_is(new, 'OUT OF MEMORY') and status_is(old, 'OUT OF MEMORY'): continue # same for OOM if (ignoreFlappingTimeouts and status_is(new, 'TIMEOUT') and any_status_is(row.results[:-2], 'TIMEOUT')): continue # flapping timeout because any of the older results is also a timeout regressions += 1 return regressions
python
{ "resource": "" }
q18364
RunSetResult.collect_data
train
def collect_data(self, correct_only): """ Load the actual result values from the XML file and the log files. This may take some time if many log files have to be opened and parsed. """ self.results = [] def get_value_from_logfile(lines, identifier): """ This method searches for values in lines of the content. It uses a tool-specific method to so. """ return load_tool(self).get_value_from_output(lines, identifier) # Opening the ZIP archive with the logs for every run is too slow, we cache it. log_zip_cache = {} try: for xml_result, result_file in self._xml_results: self.results.append(RunResult.create_from_xml( xml_result, get_value_from_logfile, self.columns, correct_only, log_zip_cache, self.columns_relevant_for_diff, result_file)) finally: for file in log_zip_cache.values(): file.close() for column in self.columns: column_values = (run_result.values[run_result.columns.index(column)] for run_result in self.results) column.type, column.unit, column.source_unit, column.scale_factor = get_column_type(column, column_values) del self._xml_results
python
{ "resource": "" }
q18365
Row.set_relative_path
train
def set_relative_path(self, common_prefix, base_dir): """ generate output representation of rows """ self.short_filename = self.filename.replace(common_prefix, '', 1)
python
{ "resource": "" }
q18366
Tool._version_newer_than
train
def _version_newer_than(self, vers): """ Determine whether the version is greater than some given version """ v = self.version(self.executable()) vers_num = v[:v.index('-')] if not vers_num[0].isdigit(): # this is the old version which is "older" than any given version return False v1 = list(map(int, vers_num.split('.'))) v2 = list(map(int, vers.split('.'))) assert len(v1) == 3 assert len(v2) == 3 if v1[0] > v2[0]: return True elif v1[0] == v2[0]: if v1[1] == v2[1]: return v1[2] >= v2[2] elif v1[1] > v2[1]: return True return False
python
{ "resource": "" }
q18367
_get_user_account_info
train
def _get_user_account_info(user): """Get the user account info from the passwd database. Only works on Linux. @param user The name of a user account or a numeric uid prefixed with '#' @return a tuple that corresponds to the members of the passwd structure @raise KeyError: If user account is unknown @raise ValueError: If uid is not a valid number """ import pwd # Import here to avoid problems on other platforms if user[0] == '#': return pwd.getpwuid(int(user[1:])) else: return pwd.getpwnam(user)
python
{ "resource": "" }
q18368
_reduce_file_size_if_necessary
train
def _reduce_file_size_if_necessary(fileName, maxSize): """ This function shrinks a file. We remove only the middle part of a file, the file-start and the file-end remain unchanged. """ fileSize = os.path.getsize(fileName) if maxSize is None: logging.debug("Size of logfile '%s' is %s bytes, size limit disabled.", fileName, fileSize) return # disabled, nothing to do if fileSize < (maxSize + 500): logging.debug("Size of logfile '%s' is %s bytes, nothing to do.", fileName, fileSize) return logging.warning("Logfile '%s' is too big (size %s bytes). Removing lines.", fileName, fileSize) util.shrink_text_file(fileName, maxSize, _LOG_SHRINK_MARKER)
python
{ "resource": "" }
q18369
_try_join_cancelled_thread
train
def _try_join_cancelled_thread(thread): """Join a thread, but if the thread doesn't terminate for some time, ignore it instead of waiting infinitely.""" thread.join(10) if thread.is_alive(): logging.warning("Thread %s did not terminate within grace period after cancellation", thread.name)
python
{ "resource": "" }
q18370
RunExecutor._init_cgroups
train
def _init_cgroups(self): """ This function initializes the cgroups for the limitations and measurements. """ self.cgroups = find_my_cgroups() for subsystem in self._cgroup_subsystems: self.cgroups.require_subsystem(subsystem) if subsystem not in self.cgroups: sys.exit('Required cgroup subsystem "{}" is missing.'.format(subsystem)) # Feature is still experimental, do not warn loudly self.cgroups.require_subsystem(BLKIO, log_method=logging.debug) if BLKIO not in self.cgroups: logging.debug('Cannot measure I/O without blkio cgroup.') self.cgroups.require_subsystem(CPUACCT) if CPUACCT not in self.cgroups: logging.warning('Without cpuacct cgroups, cputime measurement and limit ' 'might not work correctly if subprocesses are started.') self.cgroups.require_subsystem(FREEZER) if FREEZER not in self.cgroups: if self._user is not None: # In sudo mode, we absolutely need at least one cgroup subsystem # to be able to find the process where we need to send signals to sys.exit('Cannot reliably kill sub-processes without freezer cgroup,' + ' this is necessary if --user is specified.' + ' Please enable this cgroup or do not specify --user.') else: logging.warning('Cannot reliably kill sub-processes without freezer cgroup.') self.cgroups.require_subsystem(MEMORY) if MEMORY not in self.cgroups: logging.warning('Cannot measure memory consumption without memory cgroup.') else: if systeminfo.has_swap() and ( not self.cgroups.has_value(MEMORY, 'memsw.max_usage_in_bytes')): logging.warning( 'Kernel misses feature for accounting swap memory, but machine has swap. ' 'Memory usage may be measured inaccurately. ' 'Please set swapaccount=1 on your kernel command line or disable swap with ' '"sudo swapoff -a".') self.cgroups.require_subsystem(CPUSET) self.cpus = None # to indicate that we cannot limit cores self.memory_nodes = None # to indicate that we cannot limit cores if CPUSET in self.cgroups: # Read available cpus/memory nodes: try: self.cpus = util.parse_int_list(self.cgroups.get_value(CPUSET, 'cpus')) except ValueError as e: logging.warning("Could not read available CPU cores from kernel: %s", e.strerror) logging.debug("List of available CPU cores is %s.", self.cpus) try: self.memory_nodes = util.parse_int_list(self.cgroups.get_value(CPUSET, 'mems')) except ValueError as e: logging.warning("Could not read available memory nodes from kernel: %s", e.strerror) logging.debug("List of available memory nodes is %s.", self.memory_nodes)
python
{ "resource": "" }
q18371
RunExecutor._build_cmdline
train
def _build_cmdline(self, args, env={}): """ Build the final command line for executing the given command, using sudo if necessary. """ if self._user is None: return super(RunExecutor, self)._build_cmdline(args, env) result = _SUDO_ARGS + [self._user] for var, value in env.items(): result.append(var + '=' + value) return result + ['--'] + args
python
{ "resource": "" }
q18372
RunExecutor._kill_process
train
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL): """ Try to send signal to given process, either directly of with sudo. Because we cannot send signals to the sudo process itself, this method checks whether the target is the sudo process and redirects the signal to sudo's child in this case. """ if self._user is not None: if not cgroups: cgroups = find_cgroups_of_process(pid) # In case we started a tool with sudo, we cannot kill the started # process itself, because sudo always runs as root. # So if we are asked to kill the started process itself (the first # process in the cgroup), we instead kill the child of sudo # (the second process in the cgroup). pids = cgroups.get_all_tasks(FREEZER) try: if pid == next(pids): pid = next(pids) except StopIteration: # pids seems to not have enough values pass finally: pids.close() self._kill_process0(pid, sig)
python
{ "resource": "" }
q18373
RunExecutor._listdir
train
def _listdir(self, path): """Return the list of files in a directory, assuming that our user can read it.""" if self._user is None: return os.listdir(path) else: args = self._build_cmdline(['/bin/ls', '-1A', path]) return subprocess.check_output(args, stderr=DEVNULL).decode('utf-8', errors='ignore').split('\n')
python
{ "resource": "" }
q18374
RunExecutor._setup_cgroups
train
def _setup_cgroups(self, my_cpus, memlimit, memory_nodes, cgroup_values): """ This method creates the CGroups for the following execution. @param my_cpus: None or a list of the CPU cores to use @param memlimit: None or memory limit in bytes @param memory_nodes: None or a list of memory nodes of a NUMA system to use @param cgroup_values: dict of additional values to set @return cgroups: a map of all the necessary cgroups for the following execution. Please add the process of the following execution to all those cgroups! """ logging.debug("Setting up cgroups for run.") # Setup cgroups, need a single call to create_cgroup() for all subsystems subsystems = [BLKIO, CPUACCT, FREEZER, MEMORY] + self._cgroup_subsystems if my_cpus is not None or memory_nodes is not None: subsystems.append(CPUSET) subsystems = [s for s in subsystems if s in self.cgroups] cgroups = self.cgroups.create_fresh_child_cgroup(*subsystems) logging.debug("Created cgroups %s.", cgroups) # First, set user-specified values such that they get overridden by our settings if necessary. for ((subsystem, option), value) in cgroup_values.items(): try: cgroups.set_value(subsystem, option, value) except EnvironmentError as e: cgroups.remove() sys.exit('{} for setting cgroup option {}.{} to "{}" (error code {}).' .format(e.strerror, subsystem, option, value, e.errno)) logging.debug('Cgroup value %s.%s was set to "%s", new value is now "%s".', subsystem, option, value, cgroups.get_value(subsystem, option)) # Setup cpuset cgroup if necessary to limit the CPU cores/memory nodes to be used. if my_cpus is not None: my_cpus_str = ','.join(map(str, my_cpus)) cgroups.set_value(CPUSET, 'cpus', my_cpus_str) my_cpus_str = cgroups.get_value(CPUSET, 'cpus') logging.debug('Using cpu cores [%s].', my_cpus_str) if memory_nodes is not None: cgroups.set_value(CPUSET, 'mems', ','.join(map(str, memory_nodes))) memory_nodesStr = cgroups.get_value(CPUSET, 'mems') logging.debug('Using memory nodes [%s].', memory_nodesStr) # Setup memory limit if memlimit is not None: limit = 'limit_in_bytes' cgroups.set_value(MEMORY, limit, memlimit) swap_limit = 'memsw.limit_in_bytes' # We need swap limit because otherwise the kernel just starts swapping # out our process if the limit is reached. # Some kernels might not have this feature, # which is ok if there is actually no swap. if not cgroups.has_value(MEMORY, swap_limit): if systeminfo.has_swap(): sys.exit('Kernel misses feature for accounting swap memory, but machine has swap. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') else: try: cgroups.set_value(MEMORY, swap_limit, memlimit) except IOError as e: if e.errno == errno.ENOTSUP: # kernel responds with operation unsupported if this is disabled sys.exit('Memory limit specified, but kernel does not allow limiting swap memory. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') raise e memlimit = cgroups.get_value(MEMORY, limit) logging.debug('Effective memory limit is %s bytes.', memlimit) if MEMORY in cgroups: try: # Note that this disables swapping completely according to # https://www.kernel.org/doc/Documentation/cgroups/memory.txt # (unlike setting the global swappiness to 0). # Our process might get killed because of this. cgroups.set_value(MEMORY, 'swappiness', '0') except IOError as e: logging.warning('Could not disable swapping for benchmarked process: %s', e) return cgroups
python
{ "resource": "" }
q18375
RunExecutor._create_temp_dir
train
def _create_temp_dir(self): """Create a temporary directory for the run.""" if self._user is None: base_dir = tempfile.mkdtemp(prefix="BenchExec_run_") else: create_temp_dir = self._build_cmdline([ 'python', '-c', 'import tempfile;' 'print(tempfile.mkdtemp(prefix="BenchExec_run_"))' ]) base_dir = subprocess.check_output(create_temp_dir).decode().strip() return base_dir
python
{ "resource": "" }
q18376
RunExecutor._cleanup_temp_dir
train
def _cleanup_temp_dir(self, base_dir): """Delete given temporary directory and all its contents.""" if self._should_cleanup_temp_dir: logging.debug('Cleaning up temporary directory %s.', base_dir) if self._user is None: util.rmtree(base_dir, onerror=util.log_rmtree_error) else: rm = subprocess.Popen(self._build_cmdline(['rm', '-rf', '--', base_dir]), stderr=subprocess.PIPE) rm_output = rm.stderr.read().decode() rm.stderr.close() if rm.wait() != 0 or rm_output: logging.warning("Failed to clean up temp directory %s: %s.", base_dir, rm_output) else: logging.info("Skipping cleanup of temporary directory %s.", base_dir)
python
{ "resource": "" }
q18377
RunExecutor._setup_environment
train
def _setup_environment(self, environments): """Return map with desired environment variables for run.""" # If keepEnv is set or sudo is used, start from a fresh environment, # otherwise with the current one. # keepEnv specifies variables to copy from the current environment, # newEnv specifies variables to set to a new value, # additionalEnv specifies variables where some value should be appended, and # clearEnv specifies variables to delete. if self._user is not None or environments.get("keepEnv", None) is not None: run_environment = {} else: run_environment = os.environ.copy() for key, value in environments.get("keepEnv", {}).items(): if key in os.environ: run_environment[key] = os.environ[key] for key, value in environments.get("newEnv", {}).items(): run_environment[key] = value for key, value in environments.get("additionalEnv", {}).items(): run_environment[key] = os.environ.get(key, "") + value for key in environments.get("clearEnv", {}).items(): run_environment.pop(key, None) logging.debug("Using additional environment %s.", environments) return run_environment
python
{ "resource": "" }
q18378
RunExecutor._setup_output_file
train
def _setup_output_file(self, output_filename, args, write_header=True): """Open and prepare output file.""" # write command line into outputFile # (without environment variables, they are documented by benchexec) try: output_file = open(output_filename, 'w') # override existing file except IOError as e: sys.exit(e) if write_header: output_file.write(' '.join(map(util.escape_string_shell, self._build_cmdline(args))) + '\n\n\n' + '-' * 80 + '\n\n\n') output_file.flush() return output_file
python
{ "resource": "" }
q18379
RunExecutor._setup_cgroup_time_limit
train
def _setup_cgroup_time_limit(self, hardtimelimit, softtimelimit, walltimelimit, cgroups, cores, pid_to_kill): """Start time-limit handler. @return None or the time-limit handler for calling cancel() """ # hard time limit with cgroups is optional (additionally enforce by ulimit) cgroup_hardtimelimit = hardtimelimit if CPUACCT in cgroups else None if any([cgroup_hardtimelimit, softtimelimit, walltimelimit]): # Start a timer to periodically check timelimit timelimitThread = _TimelimitThread(cgroups=cgroups, hardtimelimit=cgroup_hardtimelimit, softtimelimit=softtimelimit, walltimelimit=walltimelimit, pid_to_kill=pid_to_kill, cores=cores, callbackFn=self._set_termination_reason, kill_process_fn=self._kill_process) timelimitThread.start() return timelimitThread return None
python
{ "resource": "" }
q18380
RunExecutor._setup_cgroup_memory_limit
train
def _setup_cgroup_memory_limit(self, memlimit, cgroups, pid_to_kill): """Start memory-limit handler. @return None or the memory-limit handler for calling cancel() """ if memlimit is not None: try: oomThread = oomhandler.KillProcessOnOomThread( cgroups=cgroups, pid_to_kill=pid_to_kill, callbackFn=self._set_termination_reason, kill_process_fn=self._kill_process) oomThread.start() return oomThread except OSError as e: logging.critical("OSError %s during setup of OomEventListenerThread: %s.", e.errno, e.strerror) return None
python
{ "resource": "" }
q18381
RunExecutor._setup_ulimit_time_limit
train
def _setup_ulimit_time_limit(self, hardtimelimit, cgroups): """Setup time limit with ulimit for the current process.""" if hardtimelimit is not None: # Also use ulimit for CPU time limit as a fallback if cgroups don't work. if CPUACCT in cgroups: # Use a slightly higher limit to ensure cgroups get used # (otherwise we cannot detect the timeout properly). ulimit = hardtimelimit + _ULIMIT_DEFAULT_OVERHEAD else: ulimit = hardtimelimit resource.setrlimit(resource.RLIMIT_CPU, (ulimit, ulimit))
python
{ "resource": "" }
q18382
RunExecutor._setup_file_hierarchy_limit
train
def _setup_file_hierarchy_limit( self, files_count_limit, files_size_limit, temp_dir, cgroups, pid_to_kill): """Start thread that enforces any file-hiearchy limits.""" if files_count_limit is not None or files_size_limit is not None: file_hierarchy_limit_thread = FileHierarchyLimitThread( self._get_result_files_base(temp_dir), files_count_limit=files_count_limit, files_size_limit=files_size_limit, cgroups=cgroups, pid_to_kill=pid_to_kill, callbackFn=self._set_termination_reason, kill_process_fn=self._kill_process) file_hierarchy_limit_thread.start() return file_hierarchy_limit_thread return None
python
{ "resource": "" }
q18383
RunExecutor._get_cgroup_measurements
train
def _get_cgroup_measurements(self, cgroups, ru_child, result): """ This method calculates the exact results for time and memory measurements. It is not important to call this method as soon as possible after the run. """ logging.debug("Getting cgroup measurements.") cputime_wait = ru_child.ru_utime + ru_child.ru_stime if ru_child else 0 cputime_cgroups = None if CPUACCT in cgroups: # We want to read the value from the cgroup. # The documentation warns about outdated values. # So we read twice with 0.1s time difference, # and continue reading as long as the values differ. # This has never happened except when interrupting the script with Ctrl+C, # but just try to be on the safe side here. tmp = cgroups.read_cputime() tmp2 = None while tmp != tmp2: time.sleep(0.1) tmp2 = tmp tmp = cgroups.read_cputime() cputime_cgroups = tmp # Usually cputime_cgroups seems to be 0.01s greater than cputime_wait. # Furthermore, cputime_wait might miss some subprocesses, # therefore we expect cputime_cgroups to be always greater (and more correct). # However, sometimes cputime_wait is a little bit bigger than cputime2. # For small values, this is probably because cputime_wait counts since fork, # whereas cputime_cgroups counts only after cgroups.add_task() # (so overhead from runexecutor is correctly excluded in cputime_cgroups). # For large values, a difference may also indicate a problem with cgroups, # for example another process moving our benchmarked process between cgroups, # thus we warn if the difference is substantial and take the larger cputime_wait value. if cputime_wait > 0.5 and (cputime_wait * 0.95) > cputime_cgroups: logging.warning( 'Cputime measured by wait was %s, cputime measured by cgroup was only %s, ' 'perhaps measurement is flawed.', cputime_wait, cputime_cgroups) result['cputime'] = cputime_wait else: result['cputime'] = cputime_cgroups for (core, coretime) in enumerate(cgroups.get_value(CPUACCT, 'usage_percpu').split(" ")): try: coretime = int(coretime) if coretime != 0: result['cputime-cpu'+str(core)] = coretime/1000000000 # nano-seconds to seconds except (OSError, ValueError) as e: logging.debug("Could not read CPU time for core %s from kernel: %s", core, e) else: # For backwards compatibility, we report cputime_wait on systems without cpuacct cgroup. # TOOD We might remove this for BenchExec 2.0. result['cputime'] = cputime_wait if MEMORY in cgroups: # This measurement reads the maximum number of bytes of RAM+Swap the process used. # For more details, c.f. the kernel documentation: # https://www.kernel.org/doc/Documentation/cgroups/memory.txt memUsageFile = 'memsw.max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): memUsageFile = 'max_usage_in_bytes' if not cgroups.has_value(MEMORY, memUsageFile): logging.warning('Memory-usage is not available due to missing files.') else: try: result['memory'] = int(cgroups.get_value(MEMORY, memUsageFile)) except IOError as e: if e.errno == errno.ENOTSUP: # kernel responds with operation unsupported if this is disabled logging.critical( "Kernel does not track swap memory usage, cannot measure memory usage." " Please set swapaccount=1 on your kernel command line.") else: raise e if BLKIO in cgroups: blkio_bytes_file = 'throttle.io_service_bytes' if cgroups.has_value(BLKIO, blkio_bytes_file): bytes_read = 0 bytes_written = 0 for blkio_line in cgroups.get_file_lines(BLKIO, blkio_bytes_file): try: dev_no, io_type, bytes_amount = blkio_line.split(' ') if io_type == "Read": bytes_read += int(bytes_amount) elif io_type == "Write": bytes_written += int(bytes_amount) except ValueError: pass # There are irrelevant lines in this file with a different structure result['blkio-read'] = bytes_read result['blkio-write'] = bytes_written logging.debug( 'Resource usage of run: walltime=%s, cputime=%s, cgroup-cputime=%s, memory=%s', result.get('walltime'), cputime_wait, cputime_cgroups, result.get('memory', None))
python
{ "resource": "" }
q18384
RunExecutor.check_for_new_files_in_home
train
def check_for_new_files_in_home(self): """Check that the user account's home directory now does not contain more files than when this instance was created, and warn otherwise. Does nothing if no user account was given to RunExecutor. @return set of newly created files """ if not self._user: return None try: created_files = set(self._listdir(self._home_dir)).difference(self._home_dir_content) except (subprocess.CalledProcessError, IOError): # Probably home directory does not exist created_files = [] if created_files: logging.warning('The tool created the following files in %s, ' 'this may influence later runs:\n\t%s', self._home_dir, '\n\t'.join(created_files)) return created_files
python
{ "resource": "" }
q18385
sethostname
train
def sethostname(name): """Set the host name of the machine.""" # TODO: replace with socket.sethostname, which is available from Python 3.3 name = name.encode() _libc.sethostname(name, len(name))
python
{ "resource": "" }
q18386
check_cgroup_availability
train
def check_cgroup_availability(wait=1): """ Basic utility to check the availability and permissions of cgroups. This will log some warnings for the user if necessary. On some systems, daemons such as cgrulesengd might interfere with the cgroups of a process soon after it was started. Thus this function starts a process, waits a configurable amount of time, and check whether the cgroups have been changed. @param wait: a non-negative int that is interpreted as seconds to wait during the check @raise SystemExit: if cgroups are not usable """ logging.basicConfig(format="%(levelname)s: %(message)s") runexecutor = RunExecutor() my_cgroups = runexecutor.cgroups if not (CPUACCT in my_cgroups and CPUSET in my_cgroups and # FREEZER in my_cgroups and # For now, we do not require freezer MEMORY in my_cgroups): sys.exit(1) with tempfile.NamedTemporaryFile(mode='rt') as tmp: runexecutor.execute_run(['sh', '-c', 'sleep {0}; cat /proc/self/cgroup'.format(wait)], tmp.name, memlimit=1024*1024, # set memlimit to force check for swapaccount # set cores and memory_nodes to force usage of CPUSET cores=util.parse_int_list(my_cgroups.get_value(CPUSET, 'cpus')), memory_nodes=my_cgroups.read_allowed_memory_banks()) lines = [] for line in tmp: line = line.strip() if line and not line == "sh -c 'sleep {0}; cat /proc/self/cgroup'".format(wait) \ and not all(c == '-' for c in line): lines.append(line) task_cgroups = find_my_cgroups(lines) fail = False for subsystem in CPUACCT, CPUSET, MEMORY, FREEZER: if subsystem in my_cgroups: if not task_cgroups[subsystem].startswith(os.path.join(my_cgroups[subsystem], 'benchmark_')): logging.warning('Task was in cgroup %s for subsystem %s, ' 'which is not the expected sub-cgroup of %s. ' 'Maybe some other program is interfering with cgroup management?', task_cgroups[subsystem], subsystem, my_cgroups[subsystem]) fail = True if fail: sys.exit(1)
python
{ "resource": "" }
q18387
Tool.allInText
train
def allInText(self, words, text): """ This function checks, if all the words appear in the given order in the text. """ index = 0 for word in words: index = text[index:].find(word) if index == -1: return False return True
python
{ "resource": "" }
q18388
printOut
train
def printOut(value, end='\n'): """ This function prints the given String immediately and flushes the output. """ sys.stdout.write(value) sys.stdout.write(end) sys.stdout.flush()
python
{ "resource": "" }
q18389
is_code
train
def is_code(filename): """ This function returns True, if a line of the file contains bracket '{'. """ with open(filename, "r") as file: for line in file: # ignore comments and empty lines if not is_comment(line) \ and '{' in line: # <-- simple indicator for code if '${' not in line: # <-- ${abc} variable to substitute return True return False
python
{ "resource": "" }
q18390
get_list_from_xml
train
def get_list_from_xml(elem, tag="option", attributes=["name"]): ''' This function searches for all "option"-tags and returns a list with all attributes and texts. ''' return flatten(([option.get(attr) for attr in attributes] + [option.text] for option in elem.findall(tag)), exclude=[None])
python
{ "resource": "" }
q18391
parse_int_list
train
def parse_int_list(s): """ Parse a comma-separated list of strings. The list may additionally contain ranges such as "1-5", which will be expanded into "1,2,3,4,5". """ result = [] for item in s.split(','): item = item.strip().split('-') if len(item) == 1: result.append(int(item[0])) elif len(item) == 2: start, end = item result.extend(range(int(start), int(end)+1)) else: raise ValueError("invalid range: '{0}'".format(s)) return result
python
{ "resource": "" }
q18392
split_number_and_unit
train
def split_number_and_unit(s): """Parse a string that consists of a integer number and an optional unit. @param s a non-empty string that starts with an int and is followed by some letters @return a triple of the number (as int) and the unit """ if not s: raise ValueError('empty value') s = s.strip() pos = len(s) while pos and not s[pos-1].isdigit(): pos -= 1 number = int(s[:pos]) unit = s[pos:].strip() return (number, unit)
python
{ "resource": "" }
q18393
parse_memory_value
train
def parse_memory_value(s): """Parse a string that contains a number of bytes, optionally with a unit like MB. @return the number of bytes encoded by the string """ number, unit = split_number_and_unit(s) if not unit or unit == 'B': return number elif unit == 'kB': return number * _BYTE_FACTOR elif unit == 'MB': return number * _BYTE_FACTOR * _BYTE_FACTOR elif unit == 'GB': return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR elif unit == 'TB': return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR else: raise ValueError('unknown unit: {} (allowed are B, kB, MB, GB, and TB)'.format(unit))
python
{ "resource": "" }
q18394
parse_timespan_value
train
def parse_timespan_value(s): """Parse a string that contains a time span, optionally with a unit like s. @return the number of seconds encoded by the string """ number, unit = split_number_and_unit(s) if not unit or unit == "s": return number elif unit == "min": return number * 60 elif unit == "h": return number * 60 * 60 elif unit == "d": return number * 24 * 60 * 60 else: raise ValueError('unknown unit: {} (allowed are s, min, h, and d)'.format(unit))
python
{ "resource": "" }
q18395
expand_filename_pattern
train
def expand_filename_pattern(pattern, base_dir): """ Expand a file name pattern containing wildcards, environment variables etc. @param pattern: The pattern string to expand. @param base_dir: The directory where relative paths are based on. @return: A list of file names (possibly empty). """ # 'join' ignores base_dir, if expandedPattern is absolute. # 'normpath' replaces 'A/foo/../B' with 'A/B', for pretty printing only pattern = os.path.normpath(os.path.join(base_dir, pattern)) # expand tilde and variables pattern = os.path.expandvars(os.path.expanduser(pattern)) # expand wildcards fileList = glob.glob(pattern) return fileList
python
{ "resource": "" }
q18396
substitute_vars
train
def substitute_vars(template, replacements): """Replace certain keys with respective values in a string. @param template: the string in which replacements should be made @param replacements: a dict or a list of pairs of keys and values """ result = template for (key, value) in replacements: result = result.replace('${' + key + '}' , value) if '${' in result: logging.warning("A variable was not replaced in '%s'.", result) return result
python
{ "resource": "" }
q18397
rmtree
train
def rmtree(path, ignore_errors=False, onerror=None): """Same as shutil.rmtree, but supports directories without write or execute permissions.""" if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise for root, dirs, unused_files in os.walk(path): for directory in dirs: try: abs_directory = os.path.join(root, directory) os.chmod(abs_directory, stat.S_IRWXU) except EnvironmentError as e: onerror(os.chmod, abs_directory, e) shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
python
{ "resource": "" }
q18398
copy_all_lines_from_to
train
def copy_all_lines_from_to(inputFile, outputFile): """Copy all lines from an input file object to an output file object.""" currentLine = inputFile.readline() while currentLine: outputFile.write(currentLine) currentLine = inputFile.readline()
python
{ "resource": "" }
q18399
write_file
train
def write_file(content, *path): """ Simply write some content to a file, overriding the file if necessary. """ with open(os.path.join(*path), "w") as file: return file.write(content)
python
{ "resource": "" }