text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maybe_obj(str_or_obj): """If argument is not a string, return it. Otherwise import the dotted name and return that. """
if not isinstance(str_or_obj, six.string_types): return str_or_obj parts = str_or_obj.split(".") mod, modname = None, None for p in parts: modname = p if modname is None else "%s.%s" % (modname, p) try: mod = __import__(modname) except ImportError: if mod is None: raise break obj = mod for p in parts[1:]: obj = getattr(obj, p) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_menu(): """Generate a new list of menus."""
root_menu = Menu(list(copy.deepcopy(settings.WAFER_MENUS))) for dynamic_menu_func in settings.WAFER_DYNAMIC_MENUS: dynamic_menu_func = maybe_obj(dynamic_menu_func) dynamic_menu_func(root_menu) return root_menu
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lock(self): ''' Try to get locked the file - the function will wait until the file is unlocked if 'wait' was defined as locktype - the funciton will raise AlreadyLocked exception if 'lock' was defined as locktype ''' # Open file self.__fd = open(self.__lockfile, "w") # Get it locked if self.__locktype == "wait": # Try to get it locked until ready fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX) elif self.__locktype == "lock": # Try to get the locker if can not raise an exception try: fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) except IOError: raise AlreadyLocked("File is already locked")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _make_handler(state_token, done_function): ''' Makes a a handler class to use inside the basic python HTTP server. state_token is the expected state token. done_function is a function that is called, with the code passed to it. ''' class LocalServerHandler(BaseHTTPServer.BaseHTTPRequestHandler): def error_response(self, msg): logging.warn( 'Error response: %(msg)s. %(path)s', msg=msg, path=self.path) self.send_response(400) self.send_header('Content-type', 'text/plain') self.end_headers() self.wfile.write(msg) def do_GET(self): parsed = urlparse.urlparse(self.path) if len(parsed.query) == 0 or parsed.path != '/callback': self.error_response( 'We encountered a problem with your request.') return params = urlparse.parse_qs(parsed.query) if params['state'] != [state_token]: self.error_response( 'Attack detected: state tokens did not match!') return if len(params['code']) != 1: self.error_response('Wrong number of "code" query parameters.') return self.send_response(200) self.send_header('Content-type', 'text/plain') self.end_headers() self.wfile.write( "courseraoauth2client: we have captured Coursera's response " "code. Feel free to close this browser window now and return " "to your terminal. Thanks!") done_function(params['code'][0]) return LocalServerHandler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def configuration(): 'Loads configuration from the file system.' defaults = ''' [oauth2] hostname = localhost port = 9876 api_endpoint = https://api.coursera.org auth_endpoint = https://accounts.coursera.org/oauth2/v1/auth token_endpoint = https://accounts.coursera.org/oauth2/v1/token verify_tls = True token_cache_base = ~/.coursera [manage_graders] client_id = NS8qaSX18X_Eu0pyNbLsnA client_secret = bUqKqGywnGXEJPFrcd4Jpw scopes = view_profile manage_graders [manage_research_exports] client_id = sDHC8Nfp-b1XMbzZx8Wa4w client_secret = pgD4adDd7lm-ksfG7UazUA scopes = view_profile manage_research_exports ''' cfg = ConfigParser.SafeConfigParser() cfg.readfp(io.BytesIO(defaults)) cfg.read([ '/etc/coursera/courseraoauth2client.cfg', os.path.expanduser('~/.coursera/courseraoauth2client.cfg'), 'courseraoauth2client.cfg', ]) return cfg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _load_token_cache(self): 'Reads the local fs cache for pre-authorized access tokens' try: logging.debug('About to read from local file cache file %s', self.token_cache_file) with open(self.token_cache_file, 'rb') as f: fs_cached = cPickle.load(f) if self._check_token_cache_type(fs_cached): logging.debug('Loaded from file system: %s', fs_cached) return fs_cached else: logging.warn('Found unexpected value in cache. %s', fs_cached) return None except IOError: logging.debug( 'Did not find file: %s on the file system.', self.token_cache_file) return None except: logging.info( 'Encountered exception loading from the file system.', exc_info=True) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _save_token_cache(self, new_cache): 'Write out to the filesystem a cache of the OAuth2 information.' logging.debug('Looking to write to local authentication cache...') if not self._check_token_cache_type(new_cache): logging.error('Attempt to save a bad value: %s', new_cache) return try: logging.debug('About to write to fs cache file: %s', self.token_cache_file) with open(self.token_cache_file, 'wb') as f: cPickle.dump(new_cache, f, protocol=cPickle.HIGHEST_PROTOCOL) logging.debug('Finished dumping cache_value to fs cache file.') except: logging.exception( 'Could not successfully cache OAuth2 secrets on the file ' 'system.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _check_token_cache_type(self, cache_value): ''' Checks the cache_value for appropriate type correctness. Pass strict=True for strict validation to ensure the latest types are being written. Returns true is correct type, False otherwise. ''' def check_string_value(name): return ( isinstance(cache_value[name], str) or isinstance(cache_value[name], unicode) ) def check_refresh_token(): if 'refresh' in cache_value: return check_string_value('refresh') else: return True return ( isinstance(cache_value, dict) and 'token' in cache_value and 'expires' in cache_value and check_string_value('token') and isinstance(cache_value['expires'], float) and check_refresh_token() )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _authorize_new_tokens(self): ''' Stands up a new localhost http server and retrieves new OAuth2 access tokens from the Coursera OAuth2 server. ''' logging.info('About to request new OAuth2 tokens from Coursera.') # Attempt to request new tokens from Coursera via the browser. state_token = uuid.uuid4().hex authorization_url = self._build_authorizaton_url(state_token) sys.stdout.write( 'Please visit the following URL to authorize this app:\n') sys.stdout.write('\t%s\n\n' % authorization_url) if _platform == 'darwin': # OS X -- leverage the 'open' command present on all modern macs sys.stdout.write( 'Mac OS X detected; attempting to auto-open the url ' 'in your default browser...\n') try: subprocess.check_call(['open', authorization_url]) except: logging.exception('Could not call `open %(url)s`.', url=authorization_url) if self.local_webserver_port is not None: # Boot up a local webserver to retrieve the response. server_address = ('', self.local_webserver_port) code_holder = CodeHolder() local_server = BaseHTTPServer.HTTPServer( server_address, _make_handler(state_token, code_holder)) while not code_holder.has_code(): local_server.handle_request() coursera_code = code_holder.code else: coursera_code = raw_input('Please enter the code from Coursera: ') form_data = { 'code': coursera_code, 'client_id': self.client_id, 'client_secret': self.client_secret, 'redirect_uri': self._redirect_uri, 'grant_type': 'authorization_code', } return self._request_tokens_from_token_endpoint(form_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _exchange_refresh_tokens(self): 'Exchanges a refresh token for an access token' if self.token_cache is not None and 'refresh' in self.token_cache: # Attempt to use the refresh token to get a new access token. refresh_form = { 'grant_type': 'refresh_token', 'refresh_token': self.token_cache['refresh'], 'client_id': self.client_id, 'client_secret': self.client_secret, } try: tokens = self._request_tokens_from_token_endpoint(refresh_form) tokens['refresh'] = self.token_cache['refresh'] return tokens except OAuth2Exception: logging.exception( 'Encountered an exception during refresh token flow.') return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def foreignkey(element, exceptions): ''' function to determine if each select field needs a create button or not ''' label = element.field.__dict__['label'] try: label = unicode(label) except NameError: pass if (not label) or (label in exceptions): return False else: return "_queryset" in element.field.__dict__
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deserialize_by_field(value, field): """ Some types get serialized to JSON, as strings. If we know what they are supposed to be, we can deserialize them """
if isinstance(field, forms.DateTimeField): value = parse_datetime(value) elif isinstance(field, forms.DateField): value = parse_date(value) elif isinstance(field, forms.TimeField): value = parse_time(value) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def main(): "Boots up the command line tool" logging.captureWarnings(True) args = build_parser().parse_args() # Configure logging args.setup_logging(args) # Dispatch into the appropriate subcommand function. try: return args.func(args) except SystemExit: raise except: logging.exception('Problem when running command. Sorry!') sys.exit(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def objectatrib(instance, atrib): ''' this filter is going to be useful to execute an object method or get an object attribute dynamically. this method is going to take into account the atrib param can contains underscores ''' atrib = atrib.replace("__", ".") atribs = [] atribs = atrib.split(".") obj = instance for atrib in atribs: if type(obj) == dict: result = obj[atrib] else: try: result = getattr(obj, atrib)() except Exception: result = getattr(obj, atrib) obj = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def upload_path(instance, filename): ''' This method is created to return the path to upload files. This path must be different from any other to avoid problems. ''' path_separator = "/" date_separator = "-" ext_separator = "." empty_string = "" # get the model name model_name = model_inspect(instance)['modelname'] # get the string date date = datetime.now().strftime("%Y-%m-%d").split(date_separator) curr_day = date[2] curr_month = date[1] curr_year = date[0] split_filename = filename.split(ext_separator) filename = empty_string.join(split_filename[:-1]) file_ext = split_filename[-1] new_filename = empty_string.join([filename, str(random.random()).split(ext_separator)[1]]) new_filename = ext_separator.join([new_filename, file_ext]) string_path = path_separator.join([model_name, curr_year, curr_month, curr_day, new_filename]) # the path is built using the current date and the modelname return string_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def remove_getdisplay(field_name): ''' for string 'get_FIELD_NAME_display' return 'FIELD_NAME' ''' str_ini = 'get_' str_end = '_display' if str_ini == field_name[0:len(str_ini)] and str_end == field_name[(-1) * len(str_end):]: field_name = field_name[len(str_ini):(-1) * len(str_end)] return field_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def append(self, filename_in_zip, file_contents): ''' Appends a file with name filename_in_zip and contents of file_contents to the in-memory zip. ''' # Set the file pointer to the end of the file self.in_memory_zip.seek(-1, io.SEEK_END) # Get a handle to the in-memory zip in append mode zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False) # Write the file to the in-memory zip zf.writestr(filename_in_zip, file_contents) # Mark the files as having been created on Windows so that # Unix permissions are not inferred as 0000 for zfile in zf.filelist: zfile.create_system = 0 # Close the ZipFile zf.close() # Rewind the file self.in_memory_zip.seek(0) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def writetofile(self, filename): '''Writes the in-memory zip to a file.''' f = open(filename, "w") f.write(self.read()) f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sponsor_image_url(sponsor, name): """Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists(): # We avoid worrying about multiple matches by always # returning the first one. return sponsor.files.filter(name=name).first().item.url return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sponsor_tagged_image(sponsor, tag): """returns the corresponding url from the tagged image list."""
if sponsor.files.filter(tag_name=tag).exists(): return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ifusergroup(parser, token): """ Check to see if the currently logged in user belongs to a specific group. Requires the Django authentication contrib app and middleware. """
try: tokensp = token.split_contents() groups = [] groups+=tokensp[1:] except ValueError: raise template.TemplateSyntaxError("Tag 'ifusergroup' requires at least 1 argument.") nodelist_true = parser.parse(('else', 'endifusergroup')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(tuple(['endifusergroup',])) parser.delete_first_token() else: nodelist_false = NodeList() return GroupCheckNode(groups, nodelist_true, nodelist_false)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def OpenHandle(self): '''Gets a handle for use with other vSphere Guest API functions. The guest library handle provides a context for accessing information about the virtual machine. Virtual machine statistics and state data are associated with a particular guest library handle, so using one handle does not affect the data associated with another handle.''' if hasattr(self, 'handle'): return self.handle else: handle = c_void_p() ret = vmGuestLib.VMGuestLib_OpenHandle(byref(handle)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return handle
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def CloseHandle(self): '''Releases a handle acquired with VMGuestLib_OpenHandle''' if hasattr(self, 'handle'): ret = vmGuestLib.VMGuestLib_CloseHandle(self.handle.value) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) del(self.handle)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def UpdateInfo(self): '''Updates information about the virtual machine. This information is associated with the VMGuestLibHandle. VMGuestLib_UpdateInfo requires similar CPU resources to a system call and therefore can affect performance. If you are concerned about performance, minimize the number of calls to VMGuestLib_UpdateInfo. If your program uses multiple threads, each thread must use a different handle. Otherwise, you must implement a locking scheme around update calls. The vSphere Guest API does not implement internal locking around access with a handle.''' ret = vmGuestLib.VMGuestLib_UpdateInfo(self.handle.value) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetSessionId(self): '''Retrieves the VMSessionID for the current session. Call this function after calling VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called, VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.''' sid = c_void_p() ret = vmGuestLib.VMGuestLib_GetSessionId(self.handle.value, byref(sid)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return sid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetCpuLimitMHz(self): '''Retrieves the upperlimit of processor use in MHz available to the virtual machine. For information about setting the CPU limit, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetCpuLimitMHz(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetCpuReservationMHz(self): '''Retrieves the minimum processing power in MHz reserved for the virtual machine. For information about setting a CPU reservation, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetCpuShares(self): '''Retrieves the number of CPU shares allocated to the virtual machine. For information about how an ESX server uses CPU shares to manage virtual machine priority, see the vSphere Resource Management Guide.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetCpuShares(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetElapsedMs(self): '''Retrieves the number of milliseconds that have passed in the virtual machine since it last started running on the server. The count of elapsed time restarts each time the virtual machine is powered on, resumed, or migrated using VMotion. This value counts milliseconds, regardless of whether the virtual machine is using processing power during that time. You can combine this value with the CPU time used by the virtual machine (VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine CPU speed. cpuUsedMs is a subset of this value.''' counter = c_uint64() ret = vmGuestLib.VMGuestLib_GetElapsedMs(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetHostProcessorSpeed(self): '''Retrieves the speed of the ESX system's physical CPU in MHz.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetHostProcessorSpeed(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemActiveMB(self): '''Retrieves the amount of memory the virtual machine is actively using its estimated working set size.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemLimitMB(self): '''Retrieves the upper limit of memory that is available to the virtual machine. For information about setting a memory limit, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemLimitMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemMappedMB(self): '''Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemOverheadMB(self): '''Retrieves the amount of "overhead" memory associated with this virtual machine that is currently consumed on the host system. Overhead memory is additional memory that is reserved for data structures required by the virtualization layer.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemReservationMB(self): '''Retrieves the minimum amount of memory that is reserved for the virtual machine. For information about setting a memory reservation, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemReservationMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemShares(self): '''Retrieves the number of memory shares allocated to the virtual machine. For information about how an ESX server uses memory shares to manage virtual machine priority, see the vSphere Resource Management Guide.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemShares(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemSwappedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by transparently swapping guest memory to disk.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSwappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemTargetSizeMB(self): '''Retrieves the size of the target memory allocation for this virtual machine.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemTargetSizeMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def GetMemUsedMB(self): '''Retrieves the estimated amount of physical host memory currently consumed for this virtual machine's physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemUsedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def wafer_form_helper(context, helper_name): ''' Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. ''' request = context.request module, class_name = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def page_menus(root_menu): """Add page menus."""
for page in Page.objects.filter(include_in_menu=True): path = page.get_path() menu = path[0] if len(path) > 1 else None try: root_menu.add_item(page.name, page.get_absolute_url(), menu=menu) except MenuError as e: logger.error("Bad menu item %r for page with slug %r." % (e, page.slug))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def redirect_profile(request): ''' The default destination from logging in, redirect to the actual profile URL ''' if request.user.is_authenticated: return HttpResponseRedirect(reverse('wafer_user_profile', args=(request.user.username,))) else: return redirect_to_login(next=reverse(redirect_profile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reviewed_badge(user, talk): """Returns a badge for the user's reviews of the talk"""
context = { 'reviewed': False, } review = None if user and not user.is_anonymous(): review = talk.reviews.filter(reviewer=user).first() if review: context['reviewed'] = True context['review_is_current'] = review.is_current() return context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def form_valid(self, form, forms): """ Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page. """
if self.object: form.save() for (formobj, linkerfield) in forms: if form != formobj: formobj.save() else: self.object = form.save() for (formobj, linkerfield) in forms: if form != formobj: setattr(formobj.instance, linkerfield, self.object) formobj.save() return HttpResponseRedirect(self.get_success_url())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def form_invalid(self, form, forms, open_tabs, position_form_default): """ Called if a form is invalid. Re-renders the context data with the data-filled forms and errors. """
# return self.render_to_response( self.get_context_data( form = form, forms = forms ) ) return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def univariate_envelope_plot(x, mean, std, ax=None, base_alpha=0.375, envelopes=[1, 3], lb=None, ub=None, expansion=10, **kwargs): """Make a plot of a mean curve with uncertainty envelopes. """
if ax is None: f = plt.figure() ax = f.add_subplot(1, 1, 1) elif ax == 'gca': ax = plt.gca() mean = scipy.asarray(mean, dtype=float).copy() std = scipy.asarray(std, dtype=float).copy() # Truncate the data so matplotlib doesn't die: if lb is not None and ub is not None and expansion != 1.0: expansion *= ub - lb ub = ub + expansion lb = lb - expansion if ub is not None: mean[mean > ub] = ub if lb is not None: mean[mean < lb] = lb l = ax.plot(x, mean, **kwargs) color = plt.getp(l[0], 'color') e = [] for i in envelopes: lower = mean - i * std upper = mean + i * std if ub is not None: lower[lower > ub] = ub upper[upper > ub] = ub if lb is not None: lower[lower < lb] = lb upper[upper < lb] = lb e.append(ax.fill_between(x, lower, upper, facecolor=color, alpha=base_alpha / i)) return (l, e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """
grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_summary(summaryfile, **kwargs): """Extracting information from an albacore summary file. Only reads which have a >0 length are returned. The fields below may or may not exist, depending on the type of sequencing performed. Fields 1-14 are for 1D sequencing. Fields 1-23 for 2D sequencing. Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing Fields 28-38 for barcoded workflows 1 filename 2 read_id 3 run_id 4 channel 5 start_time 6 duration 7 num_events 8 template_start 9 num_events_template 10 template_duration 11 num_called_template 12 sequence_length_template 13 mean_qscore_template 14 strand_score_template 15 complement_start 16 num_events_complement 17 complement_duration 18 num_called_complement 19 sequence_length_complement 20 mean_qscore_complement 21 strand_score_complement 22 sequence_length_2d 23 mean_qscore_2d 24 filename1 25 filename2 26 read_id1 27 read_id2 28 barcode_arrangement 29 barcode_score 30 barcode_full_arrangement 31 front_score 32 rear_score 33 front_begin_index 34 front_foundseq_length 35 rear_end_index 36 rear_foundseq_length 37 kit 38 variant """
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format( summaryfile, kwargs["readtype"])) ut.check_existance(summaryfile) if kwargs["readtype"] == "1D": cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_template", "mean_qscore_template"] elif kwargs["readtype"] in ["2D", "1D2"]: cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"] if kwargs["barcoded"]: cols.append("barcode_arrangement") logging.info("Nanoget: Extracting metrics per barcode.") try: datadf = pd.read_csv( filepath_or_buffer=summaryfile, sep="\t", usecols=cols, ) except ValueError: logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format( summaryfile, ', '.join(cols))) sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format( summaryfile, ', '.join(cols))) if kwargs["barcoded"]: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals", "barcode"] else: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"] logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile)) return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_bam(bam, samtype="bam"): """Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read """
ut.check_existance(bam) samfile = pysam.AlignmentFile(bam, "rb") if not samfile.has_index(): pysam.index(bam) samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index logging.info("Nanoget: No index for bam file could be found, created index.") if not samfile.header['HD']['SO'] == 'coordinate': logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam)) sys.exit("Please use a bam file sorted by coordinate.") if samtype == "bam": logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format( bam, samfile.mapped, samfile.unmapped)) if samfile.mapped == 0: logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam)) sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam)) return samfile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_ubam(bam, **kwargs): """Extracting metrics from unaligned bam format Extracting lengths """
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam)) samfile = pysam.AlignmentFile(bam, "rb", check_sq=False) if not samfile.has_index(): pysam.index(bam) # Need to reload the samfile after creating index samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") datadf = pd.DataFrame( data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length) for read in samfile.fetch(until_eof=True)], columns=["readIDs", "quals", "lengths"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: ubam {} contains {} reads.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_bam(bam, **kwargs): """Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame """
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam)) samfile = check_bam(bam) chromosomes = samfile.references params = zip([bam] * len(chromosomes), chromosomes) with cfutures.ProcessPoolExecutor() as executor: datadf = pd.DataFrame( data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist], columns=["readIDs", "quals", "aligned_quals", "lengths", "aligned_lengths", "mapQ", "percentIdentity"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: bam {} contains {} primary alignments.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_from_bam(params): """Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length """
bam, chromosome = params samfile = pysam.AlignmentFile(bam, "rb") return [ (read.query_name, nanomath.ave_qual(read.query_qualities), nanomath.ave_qual(read.query_alignment_qualities), read.query_length, read.query_alignment_length, read.mapping_quality, get_pID(read)) for read in samfile.fetch(reference=chromosome, multiple_iterators=True) if not read.is_secondary]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pID(read): """Return the percent identity of a read. based on the NM tag if present, if not calculate from MD tag and CIGAR string read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L """
try: return 100 * (1 - read.get_tag("NM") / read.query_alignment_length) except KeyError: try: return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) / read.query_alignment_length) except KeyError: return None except ZeroDivisionError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_compressed_input(inputfq, file_type="fastq"): """Return handles from compressed files according to extension. Check for which fastq input is presented and open a handle accordingly Can read from compressed files (gz, bz2, bgz) or uncompressed Relies on file extensions to recognize compression """
ut.check_existance(inputfq) if inputfq.endswith(('.gz', 'bgz')): import gzip logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq)) return gzip.open(inputfq, 'rt') elif inputfq.endswith('.bz2'): import bz2 logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq)) return bz2.open(inputfq, 'rt') elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')): return open(inputfq, 'r') else: logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq)) sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n' 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_fasta(fasta, **kwargs): """Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.") inputfasta = handle_compressed_input(fasta, file_type="fasta") return ut.reduce_memory_usage(pd.DataFrame( data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")], columns=["lengths"] ).dropna())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_fastq_plain(fastq, **kwargs): """Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.") inputfastq = handle_compressed_input(fastq) return ut.reduce_memory_usage(pd.DataFrame( data=[res for res in extract_from_fastq(inputfastq) if res], columns=["quals", "lengths"] ).dropna())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_fastq_full(fastq, threads): """Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght """
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_fastq_rich(fastq, **kwargs): """Extract metrics from a richer fastq file. Extract information from fastq files generated by albacore or MinKNOW, containing richer information in the header (key-value pairs) read=<int> [72] ch=<int> [159] start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp Z indicates UTC time, T is the delimiter between date expression and time expression dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse -> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc()) """
logging.info("Nanoget: Starting to collect statistics from rich fastq file.") inputfastq = handle_compressed_input(fastq) res = [] for record in SeqIO.parse(inputfastq, "fastq"): try: read_info = info_to_dict(record.description) res.append( (nanomath.ave_qual(record.letter_annotations["phred_quality"]), len(record), read_info["ch"], read_info["start_time"], read_info["runid"])) except KeyError: logging.error("Nanoget: keyerror when processing record {}".format(record.description)) sys.exit("Unexpected fastq identifier:\n{}\n\n \ missing one or more of expected fields 'ch', 'start_time' or 'runid'".format( record.description)) df = pd.DataFrame( data=res, columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna() df["channelIDs"] = df["channelIDs"].astype("int64") return ut.reduce_memory_usage(df)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fq_minimal(fq): """Minimal fastq metrics extractor. Quickly parse a fasta/fastq file - but makes expectations on the file format There will be dragons if unexpected format is used Expects a fastq_rich format, but extracts only timestamp and length """
try: while True: time = next(fq)[1:].split(" ")[4][11:-1] length = len(next(fq)) next(fq) next(fq) yield time, length except StopIteration: yield None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_piece(string, index): """ Returns Piece subclass given index of piece. :type: index: int :type: loc Location :raise: KeyError """
piece = string[index].strip() piece = piece.upper() piece_dict = {'R': Rook, 'P': Pawn, 'B': Bishop, 'N': Knight, 'Q': Queen, 'K': King} try: return piece_dict[piece] except KeyError: raise ValueError("Piece {} is invalid".format(piece))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def short_alg(algebraic_string, input_color, position): """ Converts a string written in short algebraic form, the color of the side whose turn it is, and the corresponding position into a complete move that can be played. If no moves match, None is returned. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: algebraic_string: str :type: input_color: Color :type: position: Board """
return make_legal(incomplete_alg(algebraic_string, input_color, position), position)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def implicify_hydrogens(self): """ remove explicit hydrogen if possible :return: number of removed hydrogens """
explicit = defaultdict(list) c = 0 for n, atom in self.atoms(): if atom.element == 'H': for m in self.neighbors(n): if self._node[m].element != 'H': explicit[m].append(n) for n, h in explicit.items(): atom = self._node[n] len_h = len(h) for i in range(len_h, 0, -1): hi = h[:i] if atom.get_implicit_h([y.order for x, y in self._adj[n].items() if x not in hi]) == i: for x in hi: self.remove_node(x) c += 1 break self.flush_cache() return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def explicify_hydrogens(self): """ add explicit hydrogens to atoms :return: number of added atoms """
tmp = [] for n, atom in self.atoms(): if atom.element != 'H': for _ in range(atom.get_implicit_h([x.order for x in self._adj[n].values()])): tmp.append(n) for n in tmp: self.add_bond(n, self.add_atom(H), Bond()) self.flush_cache() return len(tmp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_valence(self): """ check valences of all atoms :return: list of invalid atoms """
return [x for x, atom in self.atoms() if not atom.check_valence(self.environment(x))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _matcher(self, other): """ return VF2 GraphMatcher MoleculeContainer < MoleculeContainer MoleculeContainer < CGRContainer """
if isinstance(other, (self._get_subclass('CGRContainer'), MoleculeContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_datetime(date): """Turn a date into a datetime at midnight. """
return datetime.datetime.combine(date, datetime.datetime.min.time())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_size_changes(self, issue): """Yield an IssueSnapshot for each time the issue size changed """
# Find the first size change, if any try: size_changes = list(filter(lambda h: h.field == 'Story Points', itertools.chain.from_iterable([c.items for c in issue.changelog.histories]))) except AttributeError: return # If we have no size changes and the issue has a current size then a size must have ben specified at issue creation time. # Return the size at creation time try: current_size = issue.fields.__dict__[self.fields['StoryPoints']] except: current_size = None size = (size_changes[0].fromString) if len(size_changes) else current_size # Issue was created yield IssueSizeSnapshot( change=None, key=issue.key, date=dateutil.parser.parse(issue.fields.created), size=size ) for change in issue.changelog.histories: change_date = dateutil.parser.parse(change.created) #sizes = list(filter(lambda i: i.field == 'Story Points', change.items)) #is_resolved = (sizes[-1].to is not None) if len(sizes) > 0 else is_resolved for item in change.items: if item.field == 'Story Points': # StoryPoints value was changed size = item.toString yield IssueSizeSnapshot( change=item.field, key=issue.key, date=change_date, size=size )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_changes(self, issue, include_resolution_changes=True): """Yield an IssueSnapshot for each time the issue changed status or resolution """
is_resolved = False # Find the first status change, if any try: status_changes = list(filter( lambda h: h.field == 'status', itertools.chain.from_iterable([c.items for c in issue.changelog.histories]))) except AttributeError: return last_status = status_changes[0].fromString if len(status_changes) > 0 else issue.fields.status.name last_resolution = None # Issue was created yield IssueSnapshot( change=None, key=issue.key, date=dateutil.parser.parse(issue.fields.created), status=last_status, resolution=None, is_resolved=is_resolved ) for change in issue.changelog.histories: change_date = dateutil.parser.parse(change.created) resolutions = list(filter(lambda i: i.field == 'resolution', change.items)) is_resolved = (resolutions[-1].to is not None) if len(resolutions) > 0 else is_resolved for item in change.items: if item.field == 'status': # Status was changed last_status = item.toString yield IssueSnapshot( change=item.field, key=issue.key, date=change_date, status=last_status, resolution=last_resolution, is_resolved=is_resolved ) elif item.field == 'resolution': last_resolution = item.toString if include_resolution_changes: yield IssueSnapshot( change=item.field, key=issue.key, date=change_date, status=last_status, resolution=last_resolution, is_resolved=is_resolved )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_issues(self, criteria={}, jql=None, order='KEY ASC', verbose=False, changelog=True): """Return a list of issues with changelog metadata. Searches for the `issue_types`, `project`, `valid_resolutions` and 'jql_filter' set in the passed-in `criteria` object. Pass a JQL string to further qualify the query results. """
query = [] if criteria.get('project', False): query.append('project IN (%s)' % ', '.join(['"%s"' % p for p in criteria['project']])) if criteria.get('issue_types', False): query.append('issueType IN (%s)' % ', '.join(['"%s"' % t for t in criteria['issue_types']])) if criteria.get('valid_resolutions', False): query.append('(resolution IS EMPTY OR resolution IN (%s))' % ', '.join(['"%s"' % r for r in criteria['valid_resolutions']])) if criteria.get('jql_filter') is not None: query.append('(%s)' % criteria['jql_filter']) if jql is not None: query.append('(%s)' % jql) queryString = "%s ORDER BY %s" % (' AND '.join(query), order,) if verbose: print("Fetching issues with query:", queryString) fromRow=0 issues = [] while True: try: if changelog: pageofissues = self.jira.search_issues(queryString, expand='changelog', maxResults=self.settings['max_results'],startAt=fromRow) else: pageofissues = self.jira.search_issues(queryString, maxResults=self.settings['max_results'],startAt=fromRow) fromRow = fromRow + int(self.settings['max_results']) issues += pageofissues if verbose: print("Got %s lines per jira query from result starting at line number %s " % (self.settings['max_results'], fromRow)) if len(pageofissues)==0: break except JIRAError as e: print("Jira query error with: {}\n{}".format(queryString, e)) return [] if verbose: print("Fetched", len(issues), "issues") return issues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_catalogs(self): """ Lists existing catalogs respect to ui view template format """
_form = CatalogSelectForm(current=self.current) _form.set_choices_of('catalog', [(i, i) for i in fixture_bucket.get_keys()]) self.form_out(_form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_catalog(self): """ Get existing catalog and fill the form with the model data. If given key not found as catalog, it generates an empty catalog data form. """
catalog_data = fixture_bucket.get(self.input['form']['catalog']) # define add or edit based on catalog data exists add_or_edit = "Edit" if catalog_data.exists else "Add" # generate form catalog_edit_form = CatalogEditForm( current=self.current, title='%s: %s' % (add_or_edit, self.input['form']['catalog'])) # add model data to form if catalog_data.exists: if type(catalog_data.data) == list: # if catalog data is an array it means no other language of value defined, therefor the value is turkish for key, data in enumerate(catalog_data.data): catalog_edit_form.CatalogDatas(catalog_key=key or "0", en='', tr=data) if type(catalog_data.data) == dict: for key, data in catalog_data.data.items(): catalog_edit_form.CatalogDatas(catalog_key=key, en=data['en'], tr=data['tr']) else: catalog_edit_form.CatalogDatas(catalog_key="0", en='', tr='') self.form_out(catalog_edit_form) # schema key for get back what key will be saved, used in save_catalog form self.output["object_key"] = self.input['form']['catalog']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_catalog(self): """ Saves the catalog data to given key Cancels if the cmd is cancel Notifies user with the process. """
if self.input["cmd"] == 'save_catalog': try: edited_object = dict() for i in self.input["form"]["CatalogDatas"]: edited_object[i["catalog_key"]] = {"en": i["en"], "tr": i["tr"]} newobj = fixture_bucket.get(self.input["object_key"]) newobj.data = edited_object newobj.store() # notify user by passing notify in output object self.output["notify"] = "catalog: %s successfully updated." % self.input[ "object_key"] except: raise HTTPError(500, "Form object could not be saved") if self.input["cmd"] == 'cancel': self.output["notify"] = "catalog: %s canceled." % self.input["object_key"]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_truthy(*dicts): """Merge multiple dictionaries, keeping the truthy values in case of key collisions. Accepts any number of dictionaries, or any other object that returns a 2-tuple of key and value pairs when its `.items()` method is called. If a key exists in multiple dictionaries passed to this function, the values from the latter dictionary is kept. If the value of the latter dictionary does not evaluate to True, then the value of the previous dictionary is kept. {'a': 1, 'b': 3, 'c': 4} """
merged = {} for d in dicts: for k, v in d.items(): merged[k] = v or merged.get(k, v) return merged
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform(self): """Perform the version upgrade on the database. """
db_versions = self.table.versions() version = self.version if (version.is_processed(db_versions) and not self.config.force_version == self.version.number): self.log( u'version {} is already installed'.format(version.number) ) return self.start() try: self._perform_version(version) except Exception: if sys.version_info < (3, 4): msg = traceback.format_exc().decode('utf8', errors='ignore') else: msg = traceback.format_exc() error = u'\n'.join(self.logs + [u'\n', msg]) self.table.record_log(version.number, error) raise self.finish()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _perform_version(self, version): """Inner method for version upgrade. Not intended for standalone use. This method performs the actual version upgrade with all the pre, post operations and addons upgrades. :param version: The migration version to upgrade to :type version: Instance of Version class """
if version.is_noop(): self.log(u'version {} is a noop'.format(version.number)) else: self.log(u'execute base pre-operations') for operation in version.pre_operations(): operation.execute(self.log) if self.config.mode: self.log(u'execute %s pre-operations' % self.config.mode) for operation in version.pre_operations(mode=self.config.mode): operation.execute(self.log) self.perform_addons() self.log(u'execute base post-operations') for operation in version.post_operations(): operation.execute(self.log) if self.config.mode: self.log(u'execute %s post-operations' % self.config.mode) for operation in version.post_operations(self.config.mode): operation.execute(self.log)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_upgrade(self): """ open websocket connection """
self.current.output['cmd'] = 'upgrade' self.current.output['user_id'] = self.current.user_id self.terminate_existing_login() self.current.user.bind_private_channel(self.current.session.sess_id) user_sess = UserSessionID(self.current.user_id) user_sess.set(self.current.session.sess_id) self.current.user.is_online(True) # Clean up the locale from session to allow it to be re-read from the user preferences after login for k in translation.DEFAULT_PREFS.keys(): self.current.session[k] = ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_view(self): """ Authenticate user with given credentials. Connects user's queue and exchange """
self.current.output['login_process'] = True self.current.task_data['login_successful'] = False if self.current.is_auth: self._do_upgrade() else: try: auth_result = self.current.auth.authenticate( self.current.input['username'], self.current.input['password']) self.current.task_data['login_successful'] = auth_result if auth_result: self._do_upgrade() except ObjectDoesNotExist: self.current.log.exception("Wrong username or another error occurred") pass except: raise if self.current.output.get('cmd') != 'upgrade': self.current.output['status_code'] = 403 else: KeepAlive(self.current.user_id).reset()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_mapping(self, structures): """ match each pattern to each molecule. if all patterns matches with all molecules return generator of all possible mapping. :param structures: disjoint molecules :return: mapping generator """
for c in permutations(structures, len(self.__patterns)): for m in product(*(x.get_substructure_mapping(y, limit=0) for x, y in zip(self.__patterns, c))): mapping = {} for i in m: mapping.update(i) if mapping: yield mapping
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, default=None): """ return the cached value or default if it can't be found :param default: default value :return: cached value """
d = cache.get(self.key) return ((json.loads(d.decode('utf-8')) if self.serialize else d) if d is not None else default)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, val, lifetime=None): """ set cache value :param val: any picklable object :param lifetime: exprition time in sec :return: val """
cache.set(self.key, (json.dumps(val) if self.serialize else val), lifetime or settings.DEFAULT_CACHE_EXPIRE_TIME) return val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all(self): """ Get all list items. Returns: Cache backend response. """
result = cache.lrange(self.key, 0, -1) return (json.loads(item.decode('utf-8')) for item in result if item) if self.serialize else result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_item(self, val): """ Removes given item from the list. Args: val: Item Returns: Cache backend response. """
return cache.lrem(self.key, json.dumps(val))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flush(cls, *args): """ Removes all keys of this namespace Without args, clears all keys starting with cls.PREFIX if called with args, clears keys starting with given cls.PREFIX + args Args: *args: Arbitrary number of arguments. Returns: List of removed keys. """
return _remove_keys([], [(cls._make_key(args) if args else cls.PREFIX) + '*'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_or_expire_session(self): """ Deletes session if keepalive request expired otherwise updates the keepalive timestamp value """
if not hasattr(self, 'key'): return now = time.time() timestamp = float(self.get() or 0) or now sess_id = self.sess_id or UserSessionID(self.user_id).get() if sess_id and now - timestamp > self.SESSION_EXPIRE_TIME: Session(sess_id).delete() return False else: self.set(now) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_message_for_lane_change(sender, **kwargs): """ Sends a message to possible owners of the current workflows next lane. Args: **kwargs: ``current`` and ``possible_owners`` are required. sender (User): User object """
current = kwargs['current'] owners = kwargs['possible_owners'] if 'lane_change_invite' in current.task_data: msg_context = current.task_data.pop('lane_change_invite') else: msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG wfi = WFCache(current).get_instance() # Deletion of used passive task invitation which belongs to previous lane. TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete() today = datetime.today() for recipient in owners: inv = TaskInvitation( instance=wfi, role=recipient, wf_name=wfi.wf.name, progress=30, start_date=today, finish_date=today + timedelta(15) ) inv.title = current.task_data.get('INVITATION_TITLE') or wfi.wf.title inv.save() # try to send notification, if it fails go on try: recipient.send_notification(title=msg_context['title'], message="%s %s" % (wfi.wf.title, msg_context['body']), typ=1, # info url='', sender=sender ) except: # todo: specify which exception pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_password(sender, **kwargs): """ Encrypts password of the user. """
if sender.model_class.__name__ == 'User': usr = kwargs['object'] if not usr.password.startswith('$pbkdf2'): usr.set_password(usr.password) usr.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def channel_list(self): """ Main screen for channel management. Channels listed and operations can be chosen on the screen. If there is an error message like non-choice, it is shown here. """
if self.current.task_data.get('msg', False): if self.current.task_data.get('target_channel_key', False): self.current.output['msgbox'] = {'type': 'info', "title": _(u"Successful Operation"), "msg": self.current.task_data['msg']} del self.current.task_data['msg'] else: self.show_warning_messages() self.current.task_data['new_channel'] = False _form = ChannelListForm(title=_(u'Public Channel List'), help_text=CHANNEL_CHOICE_TEXT) for channel in Channel.objects.filter(typ=15): owner_name = channel.owner.username _form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key) _form.new_channel = fields.Button(_(u"Merge At New Channel"), cmd="create_new_channel") _form.existing_channel = fields.Button(_(u"Merge With An Existing Channel"), cmd="choose_existing_channel") _form.find_chosen_channel = fields.Button(_(u"Split Channel"), cmd="find_chosen_channel") self.form_out(_form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def channel_choice_control(self): """ It controls errors. If there is an error, returns channel list screen with error message. """
self.current.task_data['control'], self.current.task_data['msg'] \ = self.selection_error_control(self.input['form']) if self.current.task_data['control']: self.current.task_data['option'] = self.input['cmd'] self.current.task_data['split_operation'] = False keys, names = self.return_selected_form_items(self.input['form']['ChannelList']) self.current.task_data['chosen_channels'] = keys self.current.task_data['chosen_channels_names'] = names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_new_channel(self): """ Features of new channel are specified like channel's name, owner etc. """
self.current.task_data['new_channel'] = True _form = NewChannelForm(Channel(), current=self.current) _form.title = _(u"Specify Features of New Channel to Create") _form.forward = fields.Button(_(u"Create"), flow="find_target_channel") self.form_out(_form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_new_channel(self): """ It saves new channel according to specified channel features. """
form_info = self.input['form'] channel = Channel(typ=15, name=form_info['name'], description=form_info['description'], owner_id=form_info['owner_id']) channel.blocking_save() self.current.task_data['target_channel_key'] = channel.key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choose_existing_channel(self): """ It is a channel choice list and chosen channels at previous step shouldn't be on the screen. """
if self.current.task_data.get('msg', False): self.show_warning_messages() _form = ChannelListForm() _form.title = _(u"Choose a Channel Which Will Be Merged With Chosen Channels") for channel in Channel.objects.filter(typ=15).exclude( key__in=self.current.task_data['chosen_channels']): owner_name = channel.owner.username _form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key) _form.choose = fields.Button(_(u"Choose")) self.form_out(_form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def existing_choice_control(self): """ It controls errors. It generates an error message if zero or more than one channels are selected. """
self.current.task_data['existing'] = False self.current.task_data['msg'] = _(u"You should choose just one channel to do operation.") keys, names = self.return_selected_form_items(self.input['form']['ChannelList']) if len(keys) == 1: self.current.task_data['existing'] = True self.current.task_data['target_channel_key'] = keys[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_channel(self): """ A channel can be splitted to new channel or other existing channel. It creates subscribers list as selectable to moved. """
if self.current.task_data.get('msg', False): self.show_warning_messages() self.current.task_data['split_operation'] = True channel = Channel.objects.get(self.current.task_data['chosen_channels'][0]) _form = SubscriberListForm(title=_(u'Choose Subscribers to Migrate')) for subscriber in Subscriber.objects.filter(channel=channel): subscriber_name = subscriber.user.username _form.SubscriberList(choice=False, name=subscriber_name, key=subscriber.key) _form.new_channel = fields.Button(_(u"Move to a New Channel"), cmd="create_new_channel") _form.existing_channel = fields.Button(_(u"Move to an Existing Channel"), cmd="choose_existing_channel") self.form_out(_form)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscriber_choice_control(self): """ It controls subscribers choice and generates error message if there is a non-choice. """
self.current.task_data['option'] = None self.current.task_data['chosen_subscribers'], names = self.return_selected_form_items( self.input['form']['SubscriberList']) self.current.task_data[ 'msg'] = "You should choose at least one subscriber for migration operation." if self.current.task_data['chosen_subscribers']: self.current.task_data['option'] = self.input['cmd'] del self.current.task_data['msg']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_complete_channel(self): """ Channels and theirs subscribers are moved completely to new channel or existing channel. """
to_channel = Channel.objects.get(self.current.task_data['target_channel_key']) chosen_channels = self.current.task_data['chosen_channels'] chosen_channels_names = self.current.task_data['chosen_channels_names'] with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}): for s in Subscriber.objects.filter(channel_id__in=chosen_channels, typ=15): s.channel = to_channel s.save() with BlockDelete(Message): Message.objects.filter(channel_id__in=chosen_channels, typ=15).delete() with BlockDelete(Channel): Channel.objects.filter(key__in=chosen_channels).delete() self.current.task_data[ 'msg'] = _(u"Chosen channels(%s) have been merged to '%s' channel successfully.") % \ (', '.join(chosen_channels_names), to_channel.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_chosen_subscribers(self): """ After splitting operation, only chosen subscribers are moved to new channel or existing channel. """
from_channel = Channel.objects.get(self.current.task_data['chosen_channels'][0]) to_channel = Channel.objects.get(self.current.task_data['target_channel_key']) with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}): for subscriber in Subscriber.objects.filter( key__in=self.current.task_data['chosen_subscribers']): subscriber.channel = to_channel subscriber.save() if self.current.task_data['new_channel']: self.copy_and_move_messages(from_channel, to_channel) self.current.task_data[ 'msg'] = _(u"Chosen subscribers and messages of them migrated from '%s' channel to " u"'%s' channel successfully.") % (from_channel.name, to_channel.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_and_move_messages(from_channel, to_channel): """ While splitting channel and moving chosen subscribers to new channel, old channel's messages are copied and moved to new channel. Args: from_channel (Channel object): move messages from channel to_channel (Channel object): move messages to channel """
with BlockSave(Message, query_dict={'channel_id': to_channel.key}): for message in Message.objects.filter(channel=from_channel, typ=15): message.key = '' message.channel = to_channel message.save()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show_warning_messages(self, title=_(u"Incorrect Operation"), box_type='warning'): """ It shows incorrect operations or successful operation messages. Args: title (string): title of message box box_type (string): type of message box (warning, info) """
msg = self.current.task_data['msg'] self.current.output['msgbox'] = {'type': box_type, "title": title, "msg": msg} del self.current.task_data['msg']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def return_selected_form_items(form_info): """ It returns chosen keys list from a given form. Args: form_info: serialized list of dict form data Returns: selected_keys(list): Chosen keys list selected_names(list): Chosen channels' or subscribers' names. """
selected_keys = [] selected_names = [] for chosen in form_info: if chosen['choice']: selected_keys.append(chosen['key']) selected_names.append(chosen['name']) return selected_keys, selected_names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selection_error_control(self, form_info): """ It controls the selection from the form according to the operations, and returns an error message if it does not comply with the rules. Args: form_info: Channel or subscriber form from the user Returns: True or False error message """
keys, names = self.return_selected_form_items(form_info['ChannelList']) chosen_channels_number = len(keys) if form_info['new_channel'] and chosen_channels_number < 2: return False, _( u"You should choose at least two channel to merge operation at a new channel.") elif form_info['existing_channel'] and chosen_channels_number == 0: return False, _( u"You should choose at least one channel to merge operation with existing channel.") elif form_info['find_chosen_channel'] and chosen_channels_number != 1: return False, _(u"You should choose one channel for split operation.") return True, None