_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q32400
from_dict
train
def from_dict(data, require=None): """Validates a dictionary containing Google service account data. Creates and returns a :class:`google.auth.crypt.Signer` instance from the private key specified in the data. Args: data (Mapping[str, str]): The service account data require (Sequence[str]): List of keys required to be present in the info. Returns: google.auth.crypt.Signer: A signer created from the private key in the service account file. Raises: ValueError: if the data was in the wrong format, or if one of the required keys is missing. """ keys_needed = set(require if require is not None else []) missing = keys_needed.difference(six.iterkeys(data)) if missing: raise ValueError( 'Service account info was not in the expected format, missing ' 'fields {}.'.format(', '.join(missing))) # Create a signer. signer = crypt.RSASigner.from_service_account_info(data) return signer
python
{ "resource": "" }
q32401
from_filename
train
def from_filename(filename, require=None): """Reads a Google service account JSON file and returns its parsed info. Args: filename (str): The path to the service account .json file. require (Sequence[str]): List of keys required to be present in the info. Returns: Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified info and a signer instance. """ with io.open(filename, 'r', encoding='utf-8') as json_file: data = json.load(json_file) return data, from_dict(data, require=require)
python
{ "resource": "" }
q32402
copy_docstring
train
def copy_docstring(source_class): """Decorator that copies a method's docstring from another class. Args: source_class (type): The class that has the documented method. Returns: Callable: A decorator that will copy the docstring of the same named method in the source class to the decorated method. """ def decorator(method): """Decorator implementation. Args: method (Callable): The method to copy the docstring to. Returns: Callable: the same method passed in with an updated docstring. Raises: ValueError: if the method already has a docstring. """ if method.__doc__: raise ValueError('Method already has a docstring.') source_method = getattr(source_class, method.__name__) method.__doc__ = source_method.__doc__ return method return decorator
python
{ "resource": "" }
q32403
from_bytes
train
def from_bytes(value): """Converts bytes to a string value, if necessary. Args: value (Union[str, bytes]): The value to be converted. Returns: str: The original value converted to unicode (if bytes) or as passed in if it started out as unicode. Raises: ValueError: If the value could not be converted to unicode. """ result = (value.decode('utf-8') if isinstance(value, six.binary_type) else value) if isinstance(result, six.text_type): return result else: raise ValueError( '{0!r} could not be converted to unicode'.format(value))
python
{ "resource": "" }
q32404
update_query
train
def update_query(url, params, remove=None): """Updates a URL's query parameters. Replaces any current values if they are already present in the URL. Args: url (str): The URL to update. params (Mapping[str, str]): A mapping of query parameter keys to values. remove (Sequence[str]): Parameters to remove from the query string. Returns: str: The URL with updated query parameters. Examples: >>> url = 'http://example.com?a=1' >>> update_query(url, {'a': '2'}) http://example.com?a=2 >>> update_query(url, {'b': '3'}) http://example.com?a=1&b=3 >> update_query(url, {'b': '3'}, remove=['a']) http://example.com?b=3 """ if remove is None: remove = [] # Split the URL into parts. parts = urllib.parse.urlparse(url) # Parse the query string. query_params = urllib.parse.parse_qs(parts.query) # Update the query parameters with the new parameters. query_params.update(params) # Remove any values specified in remove. query_params = { key: value for key, value in six.iteritems(query_params) if key not in remove} # Re-encoded the query string. new_query = urllib.parse.urlencode(query_params, doseq=True) # Unsplit the url. new_parts = parts._replace(query=new_query) return urllib.parse.urlunparse(new_parts)
python
{ "resource": "" }
q32405
padded_urlsafe_b64decode
train
def padded_urlsafe_b64decode(value): """Decodes base64 strings lacking padding characters. Google infrastructure tends to omit the base64 padding characters. Args: value (Union[str, bytes]): The encoded value. Returns: bytes: The decoded value """ b64string = to_bytes(value) padded = b64string + b'=' * (-len(b64string) % 4) return base64.urlsafe_b64decode(padded)
python
{ "resource": "" }
q32406
configure_processes
train
def configure_processes(agent_metadata_map, logger): """ This will update the priority and CPU affinity of the processes owned by bots to try to achieve fairness and good performance. :param agent_metadata_map: A mapping of player index to agent metadata, including a list of owned process ids. """ if not optional_packages_installed: logger.warning("\n#### WARNING ####\n" "You are missing some optional packages which will become mandatory in the future!\n" "Please run `pip install -r requirements.txt` to enjoy optimal functionality " "and future-proof yourself!\n") if not optional_packages_installed: return team_pids_map = {} for player_index, data in agent_metadata_map.items(): team = data.team if team not in team_pids_map: team_pids_map[team] = set() team_pids_map[team].update(data.pids) shared_pids = set() cpu_count = psutil.cpu_count() cpus_per_team = cpu_count // 3 if len(team_pids_map) >= 2 and cpus_per_team > 0: # Sort into three sets of pids: team 0 exclusives, team 1 exclusives, and shared pids # All pids will be assigned high priority # Team exclusive pids will be bound to a subset of cpus so they can't adversely affect the opposite team. for team, team_set in team_pids_map.items(): if not shared_pids: shared_pids.update(team_set) else: shared_pids.intersection_update(team_set) for team, team_set in team_pids_map.items(): team_set -= shared_pids for team, team_pids in team_pids_map.items(): team_cpu_offset = cpus_per_team * team team_cpus = list(range(cpu_count - cpus_per_team - team_cpu_offset, cpu_count - team_cpu_offset)) for pid in team_pids: p = psutil.Process(pid) p.cpu_affinity(team_cpus) # Restrict the process to run on the cpus assigned to the team p.nice(psutil.HIGH_PRIORITY_CLASS) # Allow the process to run at high priority else: # Consider everything a shared pid, because we are not in a position to split up cpus. for team, team_set in team_pids_map.items(): shared_pids.update(team_set) for pid in shared_pids: p = psutil.Process(pid) # Allow the process to run at high priority p.nice(psutil.HIGH_PRIORITY_CLASS)
python
{ "resource": "" }
q32407
ConfigObject.get_header
train
def get_header(self, header_name): """ Returns a header with that name, creates it if it does not exist. """ if header_name in self.headers: return self.headers[header_name] return self.add_header_name(header_name)
python
{ "resource": "" }
q32408
log_warn
train
def log_warn(message, args): """Logs a warning message using the default logger.""" get_logger(DEFAULT_LOGGER, log_creation=False).log(logging.WARNING, message, *args)
python
{ "resource": "" }
q32409
CarCustomisationDialog.create_config_headers_dicts
train
def create_config_headers_dicts(self): """ Creates the config_headers_to_widgets and config_widgets_to_headers and config_headers_to_categories dicts """ self.config_headers_to_widgets = { # blue stuff 'Bot Loadout': { 'team_color_id': (self.blue_primary_spinbox,), 'custom_color_id': (self.blue_secondary_spinbox,), 'car_id': (self.blue_car_spinbox, self.blue_car_combobox), 'decal_id': (self.blue_decal_spinbox, self.blue_decal_combobox), 'wheels_id': (self.blue_wheels_spinbox, self.blue_wheels_combobox), 'boost_id': (self.blue_boost_spinbox, self.blue_boost_combobox), 'antenna_id': (self.blue_antenna_spinbox, self.blue_antenna_combobox), 'hat_id': (self.blue_hat_spinbox, self.blue_hat_combobox), 'paint_finish_id': (self.blue_paint_finish_spinbox, self.blue_paint_finish_combobox), 'custom_finish_id': (self.blue_custom_finish_spinbox, self.blue_custom_finish_combobox), 'engine_audio_id': (self.blue_engine_spinbox, self.blue_engine_combobox), 'trails_id': (self.blue_trails_spinbox, self.blue_trails_combobox), 'goal_explosion_id': (self.blue_goal_explosion_spinbox, self.blue_goal_explosion_combobox) }, 'Bot Loadout Orange': { 'team_color_id': (self.orange_primary_spinbox,), 'custom_color_id': (self.orange_secondary_spinbox,), 'car_id': (self.orange_car_spinbox, self.orange_car_combobox), 'decal_id': (self.orange_decal_spinbox, self.orange_decal_combobox), 'wheels_id': (self.orange_wheels_spinbox, self.orange_wheels_combobox), 'boost_id': (self.orange_boost_spinbox, self.orange_boost_combobox), 'antenna_id': (self.orange_antenna_spinbox, self.orange_antenna_combobox), 'hat_id': (self.orange_hat_spinbox, self.orange_hat_combobox), 'paint_finish_id': (self.orange_paint_finish_spinbox, self.orange_paint_finish_combobox), 'custom_finish_id': (self.orange_custom_finish_spinbox, self.orange_custom_finish_combobox), 'engine_audio_id': (self.orange_engine_spinbox, self.orange_engine_combobox), 'trails_id': (self.orange_trails_spinbox, self.orange_trails_combobox), 'goal_explosion_id': (self.orange_goal_explosion_spinbox, self.orange_goal_explosion_combobox) }, } self.config_widgets_to_headers = {} for header_1, _field_dict in self.config_headers_to_widgets.items(): for header_2, _widgets in _field_dict.items(): for _widget in _widgets: self.config_widgets_to_headers[_widget] = (header_1, header_2) self.config_headers_to_categories = { 'car_id': 'Body', 'decal_id': 'Decal', 'wheels_id': 'Wheels', 'boost_id': 'Rocket Boost', 'antenna_id': 'Antenna', 'hat_id': 'Topper', 'paint_finish_id': 'Paint Finish', 'custom_finish_id': 'Paint Finish', 'engine_audio_id': 'Engine Audio', 'trails_id': 'Trail', 'goal_explosion_id': 'Goal Explosion' }
python
{ "resource": "" }
q32410
get_rlbot_directory
train
def get_rlbot_directory() -> str: """Gets the path of the rlbot package directory""" return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
python
{ "resource": "" }
q32411
read_match_config_from_file
train
def read_match_config_from_file(match_config_path: Path) -> MatchConfig: """ Parse the rlbot.cfg file on disk into the python datastructure. """ config_obj = create_bot_config_layout() config_obj.parse_file(match_config_path, max_index=MAX_PLAYERS) return parse_match_config(config_obj, match_config_path, {}, {})
python
{ "resource": "" }
q32412
validate_bot_config
train
def validate_bot_config(config_bundle) -> None: """ Checks the config bundle to see whether it has all required attributes. """ if not config_bundle.name: bot_config = os.path.join(config_bundle.config_directory, config_bundle.config_file_name or '') raise AttributeError(f"Bot config {bot_config} has no name configured!") # This will raise an exception if we can't find the looks config, or if it's malformed config_bundle.get_looks_config()
python
{ "resource": "" }
q32413
HelperProcessManager.start_or_update_helper_process
train
def start_or_update_helper_process(self, agent_metadata: AgentMetadata): """ Examines the agent metadata to see if the agent needs a helper process. If the process is not running yet, create the process. Once the process is running, feed the agent metadata to it. If a process is created here, the pid will be added to the agent metadata. """ helper_req = agent_metadata.helper_process_request if helper_req is not None: if helper_req.key not in self.helper_process_map: metadata_queue = mp.Queue() if helper_req.python_file_path is not None: process = mp.Process(target=run_helper_process, args=(helper_req.python_file_path, metadata_queue, self.quit_event, helper_req.options)) process.daemon = True process.start() agent_metadata.pids.add(process.pid) self.helper_process_map[helper_req.key] = metadata_queue if helper_req.executable is not None: # TODO: find a nice way to pass the options dict as arguments process = subprocess.Popen([helper_req.executable]) agent_metadata.pids.add(process.pid) self.helper_process_map[helper_req.key] = metadata_queue metadata_queue = self.helper_process_map[helper_req.key] metadata_queue.put(agent_metadata)
python
{ "resource": "" }
q32414
PlayerConfig.bot_config
train
def bot_config(player_config_path: Path, team: Team) -> 'PlayerConfig': """ A function to cover the common case of creating a config for a bot. """ bot_config = PlayerConfig() bot_config.bot = True bot_config.rlbot_controlled = True bot_config.team = team.value bot_config.config_path = str(player_config_path.absolute()) # TODO: Refactor to use Path's config_bundle = get_bot_config_bundle(bot_config.config_path) bot_config.name = config_bundle.name bot_config.loadout_config = load_bot_appearance(config_bundle.get_looks_config(), bot_config.team) return bot_config
python
{ "resource": "" }
q32415
setup_manager_context
train
def setup_manager_context(): """ Creates a initialized context manager which shuts down at the end of the `with` block. usage: >>> with setup_manager_context() as setup_manager: ... setup_manager.load_config(...) ... # ... Run match """ setup_manager = SetupManager() setup_manager.connect_to_game() try: yield setup_manager finally: setup_manager.shut_down(kill_all_pids=True)
python
{ "resource": "" }
q32416
SetupManager.load_match_config
train
def load_match_config(self, match_config: MatchConfig, bot_config_overrides={}): """ Loads the match config into internal data structures, which prepares us to later launch bot processes and start the match. This is an alternative to the load_config method; they accomplish the same thing. """ self.num_participants = match_config.num_players self.names = [bot.name for bot in match_config.player_configs] self.teams = [bot.team for bot in match_config.player_configs] bundles = [bot_config_overrides[index] if index in bot_config_overrides else get_bot_config_bundle(bot.config_path) if bot.config_path else None for index, bot in enumerate(match_config.player_configs)] self.python_files = [bundle.python_file if bundle else None for bundle in bundles] self.parameters = [] for index, bot in enumerate(match_config.player_configs): python_config = None if bot.rlbot_controlled: python_config = load_bot_parameters(bundles[index]) self.parameters.append(python_config) if bot.loadout_config is None and bundles[index]: looks_config = bundles[index].get_looks_config() bot.loadout_config = load_bot_appearance(looks_config, bot.team) if match_config.extension_config is not None and match_config.extension_config.python_file_path is not None: self.load_extension(match_config.extension_config.python_file_path) self.match_config = match_config self.start_match_configuration = match_config.create_match_settings() self.game_interface.start_match_configuration = self.start_match_configuration
python
{ "resource": "" }
q32417
SetupManager.load_config
train
def load_config(self, framework_config: ConfigObject = None, config_location=DEFAULT_RLBOT_CONFIG_LOCATION, bot_configs=None, looks_configs=None): """ Loads the configuration into internal data structures, which prepares us to later launch bot processes and start the match. :param framework_config: A config object that indicates what bots to run. May come from parsing a rlbot.cfg. :param config_location: The location of the rlbot.cfg file, which will be used to resolve relative paths. :param bot_configs: Overrides for bot configurations. :param looks_configs: Overrides for looks configurations. """ self.logger.debug('reading the configs') # Set up RLBot.cfg if framework_config is None: framework_config = create_bot_config_layout() framework_config.parse_file(config_location, max_index=MAX_PLAYERS) if bot_configs is None: bot_configs = {} if looks_configs is None: looks_configs = {} match_config = parse_match_config(framework_config, config_location, bot_configs, looks_configs) self.load_match_config(match_config, bot_configs)
python
{ "resource": "" }
q32418
load_external_module
train
def load_external_module(python_file): """ Returns the loaded module. All of its newly added dependencies are removed from sys.path after load. """ # There's a special case where python_file may be pointing at the base agent definition here in the framework. # This is sometimes done as a default and we want to allow it. Short-circuit the logic because # loading it as if it's an external class is a real mess. if os.path.abspath(python_file) == os.path.abspath(inspect.getfile(BaseAgent)): return BaseAgent, BaseAgent.__module__ if not os.path.isfile(python_file): raise FileNotFoundError(f"Could not find file {python_file}!") dir_name = os.path.dirname(python_file) module_name = os.path.splitext(os.path.basename(python_file))[0] keys_before = set(sys.modules.keys()) # Temporarily modify the sys.path while we load the module so that the module can use import statements naturally sys.path.insert(0, dir_name) loaded_module = importlib.import_module(module_name) # Clean up the changes to sys.path and sys.modules to avoid collisions with other external classes and to # prepare for the next reload. added = set(sys.modules.keys()).difference(keys_before) del sys.path[0] for key in added: del sys.modules[key] return loaded_module
python
{ "resource": "" }
q32419
BotManager.send_quick_chat_from_agent
train
def send_quick_chat_from_agent(self, team_only, quick_chat): """ Passes the agents quick chats to the game, and also to other python bots. This does perform limiting. You are limited to 5 quick chats in a 2 second period starting from the first chat. This means you can spread your chats out to be even within that 2 second period. You could spam them in the first little bit but then will be throttled. """ # Send the quick chat to the game rlbot_status = send_quick_chat_flat(self.game_interface, self.index, self.team, team_only, quick_chat) if rlbot_status == RLBotCoreStatus.QuickChatRateExceeded: self.logger.debug('quick chat disabled') else: # Make the quick chat visible to other python bots. Unfortunately other languages can't see it. send_quick_chat(self.quick_chat_queue_holder, self.index, self.team, team_only, quick_chat)
python
{ "resource": "" }
q32420
BotManager.run
train
def run(self): """ Loads interface for RLBot, prepares environment and agent, and calls the update for the agent. """ self.logger.debug('initializing agent') self.game_interface.load_interface() self.prepare_for_run() # Create Ratelimiter rate_limit = rate_limiter.RateLimiter(GAME_TICK_PACKET_POLLS_PER_SECOND) last_tick_game_time = None # What the tick time of the last observed tick was last_call_real_time = datetime.now() # When we last called the Agent # Get bot module agent, agent_class_file = self.load_agent() last_module_modification_time = os.stat(agent_class_file).st_mtime # Run until main process tells to stop, or we detect Ctrl+C try: while not self.terminate_request_event.is_set(): self.pull_data_from_game() # game_tick_packet = self.game_interface.get # Read from game data shared memory # Run the Agent only if the game_info has updated. tick_game_time = self.get_game_time() should_call_while_paused = datetime.now() - last_call_real_time >= MAX_AGENT_CALL_PERIOD if tick_game_time != last_tick_game_time or should_call_while_paused: last_tick_game_time = tick_game_time last_call_real_time = datetime.now() # Reload the Agent if it has been modified or if reload is requested from outside. try: new_module_modification_time = os.stat(agent_class_file).st_mtime if new_module_modification_time != last_module_modification_time or self.reload_request_event.is_set(): self.reload_request_event.clear() last_module_modification_time = new_module_modification_time # Clear the render queue on reload. if hasattr(agent, 'renderer') and isinstance(agent.renderer, RenderingManager): agent.renderer.clear_all_touched_render_groups() agent, agent_class_file = self.reload_agent(agent, agent_class_file) except FileNotFoundError: self.logger.error(f"Agent file {agent_class_file} was not found. Will try again.") time.sleep(0.5) except Exception: self.logger.error("Reloading the agent failed:\n" + traceback.format_exc()) time.sleep(0.5) # Avoid burning CPU / logs if this starts happening constantly # Call agent try: self.call_agent(agent, self.agent_class_wrapper.get_loaded_class()) except Exception as e: self.logger.error("Call to agent failed:\n" + traceback.format_exc()) # Ratelimit here rate_limit.acquire() except KeyboardInterrupt: self.terminate_request_event.set() # Shut down the bot by calling cleanup functions. if hasattr(agent, 'retire'): try: agent.retire() except Exception as e: self.logger.error("Retiring the agent failed:\n" + traceback.format_exc()) if hasattr(agent, 'renderer') and isinstance(agent.renderer, RenderingManager): agent.renderer.clear_all_touched_render_groups() # Zero out the inputs, so it's more obvious that the bot has stopped. self.game_interface.update_player_input(PlayerInput(), self.index) self.quick_chat_quit_event.set() # Shut down quick chat. # If terminated, send callback self.termination_complete_event.set()
python
{ "resource": "" }
q32421
RLBotQTGui.clean_overall_config_loadouts
train
def clean_overall_config_loadouts(self): """ Set all unusued loadout paths to None. This makes sure agents don't have a custom loadout when new agents are added in the gui. """ for i in range(MAX_PLAYERS): if i not in self.index_manager.numbers: self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_LOADOUT_CONFIG_KEY, "None", i)
python
{ "resource": "" }
q32422
BaseAgent.convert_output_to_v4
train
def convert_output_to_v4(self, controller_input): """Converts a v3 output to a v4 controller state""" player_input = SimpleControllerState() player_input.throttle = controller_input[0] player_input.steer = controller_input[1] player_input.pitch = controller_input[2] player_input.yaw = controller_input[3] player_input.roll = controller_input[4] player_input.jump = controller_input[5] player_input.boost = controller_input[6] player_input.handbrake = controller_input[7] return player_input
python
{ "resource": "" }
q32423
_wait_until_good_ticks
train
def _wait_until_good_ticks(game_interface: GameInterface, required_new_ticks: int=3): """Blocks until we're getting new packets, indicating that the match is ready.""" rate_limit = rate_limiter.RateLimiter(120) last_tick_game_time = None # What the tick time of the last observed tick was packet = GameTickPacket() # We want to do a deep copy for game inputs so people don't mess with em seen_times = 0 while seen_times < required_new_ticks: game_interface.update_live_data_packet(packet) def is_good_tick(): if packet.game_info.seconds_elapsed == last_tick_game_time: return False if not packet.game_info.is_round_active: return False if any(car.is_demolished for car in packet.game_cars): return False return True if is_good_tick(): seen_times += 1 last_tick_game_time = packet.game_info.seconds_elapsed rate_limit.acquire()
python
{ "resource": "" }
q32424
training_status_renderer_context
train
def training_status_renderer_context(exercise_names: List[str], renderman: RenderingManager): """ Ensures that the screen is always cleared, even on fatal errors in code that uses this renderer. """ renderer = TrainingStatusRenderer(exercise_names, renderman) try: yield renderer finally: renderer.clear_screen()
python
{ "resource": "" }
q32425
GameInterface.inject_dll
train
def inject_dll(self): """ Calling this function will inject the DLL without GUI DLL will return status codes from 0 to 5 which correspond to injector_codes DLL injection is only valid if codes are 0->'INJECTION_SUCCESSFUL' or 3->'RLBOT_DLL_ALREADY_INJECTED' It will print the output code and if it's not valid it will kill runner.py If RL isn't running the Injector will stay hidden waiting for RL to open and inject as soon as it does """ self.logger.info('Injecting DLL') # Inject DLL injector_dir = os.path.join(get_dll_directory(), 'RLBot_Injector.exe') for file in ['RLBot_Injector.exe', 'RLBot_Core.dll', 'RLBot_Core_Interface.dll', 'RLBot_Core_Interface_32.dll']: file_path = os.path.join(get_dll_directory(), file) if not os.path.isfile(file_path): raise FileNotFoundError(f'{file} was not found in {get_dll_directory()}. ' 'Please check that the file exists and your antivirus ' 'is not removing it. See https://github.com/RLBot/RLBot/wiki/Antivirus-Notes') incode = subprocess.call([injector_dir, 'hidden']) injector_codes = ['INJECTION_SUCCESSFUL', 'INJECTION_FAILED', 'MULTIPLE_ROCKET_LEAGUE_PROCESSES_FOUND', 'RLBOT_DLL_ALREADY_INJECTED', 'RLBOT_DLL_NOT_FOUND', 'MULTIPLE_RLBOT_DLL_FILES_FOUND'] injector_valid_codes = ['INJECTION_SUCCESSFUL', 'RLBOT_DLL_ALREADY_INJECTED'] injection_status = injector_codes[incode] if injection_status in injector_valid_codes: self.logger.info('Finished Injecting DLL') return injection_status else: self.logger.error('Failed to inject DLL: ' + injection_status) sys.exit()
python
{ "resource": "" }
q32426
GameInterface.update_rigid_body_tick
train
def update_rigid_body_tick(self, rigid_body_tick: RigidBodyTick): """Get the most recent state of the physics engine.""" rlbot_status = self.game.UpdateRigidBodyTick(rigid_body_tick) self.game_status(None, rlbot_status) return rigid_body_tick
python
{ "resource": "" }
q32427
GameInterface.get_ball_prediction
train
def get_ball_prediction(self) -> BallPredictionPacket: """ Gets the latest ball prediction available in shared memory. Only works if BallPrediction.exe is running. """ byte_buffer = self.game.GetBallPrediction() if byte_buffer.size >= 4: # GetRootAsGameTickPacket gets angry if the size is less than 4 # We're counting on this copying the data over to a new memory location so that the original # pointer can be freed safely. proto_string = ctypes.string_at(byte_buffer.ptr, byte_buffer.size) self.game.Free(byte_buffer.ptr) # Avoid a memory leak self.game_status(None, RLBotCoreStatus.Success) return BallPredictionPacket.GetRootAsBallPrediction(proto_string, 0)
python
{ "resource": "" }
q32428
CarState.convert_to_flat
train
def convert_to_flat(self, builder): """ In this conversion, we always want to return a valid flatbuffer pointer even if all the contents are blank because sometimes we need to put empty car states into the car list to make the indices line up. """ physics_offset = None if self.physics is None else self.physics.convert_to_flat(builder) DesiredCarState.DesiredCarStateStart(builder) if physics_offset is not None: DesiredCarState.DesiredCarStateAddPhysics(builder, physics_offset) if self.boost_amount is not None: DesiredCarState.DesiredCarStateAddBoostAmount(builder, Float.CreateFloat(builder, self.boost_amount)) if self.jumped is not None: DesiredCarState.DesiredCarStateAddJumped(builder, Bool.CreateBool(builder, self.jumped)) if self.double_jumped is not None: DesiredCarState.DesiredCarStateAddDoubleJumped(builder, Bool.CreateBool(builder, self.double_jumped)) return DesiredCarState.DesiredCarStateEnd(builder)
python
{ "resource": "" }
q32429
BoostState.convert_to_flat
train
def convert_to_flat(self, builder): """ In this conversion, we always want to return a valid flatbuffer pointer even if all the contents are blank because sometimes we need to put empty boost states into the boost list to make the indices line up. """ DesiredBoostState.DesiredBoostStateStart(builder) if self.respawn_time is not None: DesiredBoostState.DesiredBoostStateAddRespawnTime(builder, Float.CreateFloat(builder, self.respawn_time)) return DesiredBoostState.DesiredBoostStateEnd(builder)
python
{ "resource": "" }
q32430
BloggingEngine.init_app
train
def init_app(self, app, storage=None, cache=None, file_upload=None): """ Initialize the engine. :param app: The app to use :type app: Object :param storage: The blog storage instance that implements the :type storage: Object :param cache: (Optional) A Flask-Cache object to enable caching :type cache: Object ``Storage`` class interface. """ self.app = app self.config = self.app.config self.storage = storage or self.storage self.file_upload = file_upload or self.file_upload self.cache = cache or self.cache self._register_plugins(self.app, self.config) from .views import create_blueprint blog_app = create_blueprint(__name__, self) # external urls blueprint_created.send(self.app, engine=self, blueprint=blog_app) self.app.register_blueprint( blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX")) self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate self.app.extensions["blogging"] = self self.principal = Principal(self.app) engine_initialised.send(self.app, engine=self) if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True): self.ffu = self.file_upload or FlaskFileUpload(app)
python
{ "resource": "" }
q32431
SQLAStorage.get_post_by_id
train
def get_post_by_id(self, post_id): """ Fetch the blog post given by ``post_id`` :param post_id: The post identifier for the blog post :type post_id: str :return: If the ``post_id`` is valid, the post data is retrieved, else returns ``None``. """ r = None post_id = _as_int(post_id) with self._engine.begin() as conn: try: post_statement = sqla.select([self._post_table]) \ .where(self._post_table.c.id == post_id) \ .alias('post') joined_statement = post_statement.join(self._tag_posts_table) \ .join(self._tag_table) \ .join(self._user_posts_table) \ .alias('join') # Note this will retrieve one row per tag all_rows = conn.execute( sqla.select([joined_statement]) ).fetchall() r = self._serialise_posts_and_tags_from_joined_rows( all_rows )[0] except Exception as e: self._logger.exception(str(e)) r = None return r
python
{ "resource": "" }
q32432
SQLAStorage.count_posts
train
def count_posts(self, tag=None, user_id=None, include_draft=False): """ Returns the total number of posts for the give filter :param tag: Filter by a specific tag :type tag: str :param user_id: Filter by a specific user :type user_id: str :param include_draft: Whether to include posts marked as draft or not :type include_draft: bool :return: The number of posts for the given filter. """ result = 0 with self._engine.begin() as conn: try: count_statement = sqla.select([sqla.func.count()]). \ select_from(self._post_table) sql_filter = self._get_filter(tag, user_id, include_draft, conn) count_statement = count_statement.where(sql_filter) result = conn.execute(count_statement).scalar() except Exception as e: self._logger.exception(str(e)) result = 0 return result
python
{ "resource": "" }
q32433
SQLAStorage.delete_post
train
def delete_post(self, post_id): """ Delete the post defined by ``post_id`` :param post_id: The identifier corresponding to a post :type post_id: int :return: Returns True if the post was successfully deleted and False otherwise. """ status = False success = 0 post_id = _as_int(post_id) with self._engine.begin() as conn: try: post_del_statement = self._post_table.delete().where( self._post_table.c.id == post_id) conn.execute(post_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) try: user_posts_del_statement = self._user_posts_table.delete(). \ where(self._user_posts_table.c.post_id == post_id) conn.execute(user_posts_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) try: tag_posts_del_statement = self._tag_posts_table.delete(). \ where(self._tag_posts_table.c.post_id == post_id) conn.execute(tag_posts_del_statement) success += 1 except Exception as e: self._logger.exception(str(e)) status = success == 3 return status
python
{ "resource": "" }
q32434
index
train
def index(count, page): """ Serves the page with a list of blog posts :param count: :param offset: :return: """ blogging_engine = _get_blogging_engine(current_app) storage = blogging_engine.storage config = blogging_engine.config count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10) meta = _get_meta(storage, count, page) offset = meta["offset"] meta["is_user_blogger"] = _is_blogger(blogging_engine.blogger_permission) meta["count"] = count meta["page"] = page render = config.get("BLOGGING_RENDER_TEXT", True) posts = storage.get_posts(count=count, offset=offset, include_draft=False, tag=None, user_id=None, recent=True) index_posts_fetched.send(blogging_engine.app, engine=blogging_engine, posts=posts, meta=meta) for post in posts: blogging_engine.process_post(post, render=render) index_posts_processed.send(blogging_engine.app, engine=blogging_engine, posts=posts, meta=meta) return render_template("blogging/index.html", posts=posts, meta=meta, config=config)
python
{ "resource": "" }
q32435
convert_to_dict
train
def convert_to_dict(obj): """Converts a StripeObject back to a regular dict. Nested StripeObjects are also converted back to regular dicts. :param obj: The StripeObject to convert. :returns: The StripeObject as a dict. """ if isinstance(obj, list): return [convert_to_dict(i) for i in obj] # This works by virtue of the fact that StripeObjects _are_ dicts. The dict # comprehension returns a regular dict and recursively applies the # conversion to each value. elif isinstance(obj, dict): return {k: convert_to_dict(v) for k, v in six.iteritems(obj)} else: return obj
python
{ "resource": "" }
q32436
Browser
train
def Browser(driver_name="firefox", *args, **kwargs): """ Returns a driver instance for the given name. When working with ``firefox``, it's possible to provide a profile name and a list of extensions. If you don't provide any driver_name, then ``firefox`` will be used. If there is no driver registered with the provided ``driver_name``, this function will raise a :class:`splinter.exceptions.DriverNotFoundError` exception. """ try: driver = _DRIVERS[driver_name] except KeyError: raise DriverNotFoundError("No driver for %s" % driver_name) return driver(*args, **kwargs)
python
{ "resource": "" }
q32437
Window.title
train
def title(self): """ The title of this window """ with switch_window(self._browser, self.name): return self._browser.title
python
{ "resource": "" }
q32438
Window.url
train
def url(self): """ The url of this window """ with switch_window(self._browser, self.name): return self._browser.url
python
{ "resource": "" }
q32439
Window.prev
train
def prev(self): """ Return the previous window """ prev_index = self.index - 1 prev_handle = self._browser.driver.window_handles[prev_index] return Window(self._browser, prev_handle)
python
{ "resource": "" }
q32440
Window.next
train
def next(self): """ Return the next window """ next_index = (self.index + 1) % len(self._browser.driver.window_handles) next_handle = self._browser.driver.window_handles[next_index] return Window(self._browser, next_handle)
python
{ "resource": "" }
q32441
Window.close
train
def close(self): """ Close this window. If this window is active, switch to previous window """ target = self.prev if (self.is_current and self.prev != self) else None with switch_window(self._browser, self.name): self._browser.driver.close() if target is not None: target.is_current = True
python
{ "resource": "" }
q32442
WebDriverElement.mouse_over
train
def mouse_over(self): """ Performs a mouse over the element. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).move_to_element(self._element).perform()
python
{ "resource": "" }
q32443
WebDriverElement.mouse_out
train
def mouse_out(self): """ Performs a mouse out the element. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).move_by_offset(0, 0).click().perform()
python
{ "resource": "" }
q32444
WebDriverElement.double_click
train
def double_click(self): """ Performs a double click in the element. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).double_click(self._element).perform()
python
{ "resource": "" }
q32445
WebDriverElement.right_click
train
def right_click(self): """ Performs a right click in the element. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).context_click(self._element).perform()
python
{ "resource": "" }
q32446
WebDriverElement.drag_and_drop
train
def drag_and_drop(self, droppable): """ Performs drag a element to another elmenet. Currently works only on Chrome driver. """ self.scroll_to() ActionChains(self.parent.driver).drag_and_drop(self._element, droppable._element).perform()
python
{ "resource": "" }
q32447
force_unicode
train
def force_unicode(value): """ Forces a bytestring to become a Unicode string. """ if IS_PY3: # Python 3.X if isinstance(value, bytes): value = value.decode('utf-8', errors='replace') elif not isinstance(value, str): value = str(value) else: # Python 2.X if isinstance(value, str): value = value.decode('utf-8', 'replace') elif not isinstance(value, basestring): # NOQA: F821 value = unicode(value) # NOQA: F821 return value
python
{ "resource": "" }
q32448
force_bytes
train
def force_bytes(value): """ Forces a Unicode string to become a bytestring. """ if IS_PY3: if isinstance(value, str): value = value.encode('utf-8', 'backslashreplace') else: if isinstance(value, unicode): # NOQA: F821 value = value.encode('utf-8') return value
python
{ "resource": "" }
q32449
safe_urlencode
train
def safe_urlencode(params, doseq=0): """ UTF-8-safe version of safe_urlencode The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values which can't fail down to ascii. """ if IS_PY3: return urlencode(params, doseq) if hasattr(params, "items"): params = params.items() new_params = [] for k, v in params: k = k.encode("utf-8") if isinstance(v, (list, tuple)): new_params.append((k, [force_bytes(i) for i in v])) else: new_params.append((k, force_bytes(v))) return urlencode(new_params, doseq)
python
{ "resource": "" }
q32450
Solr._extract_error
train
def _extract_error(self, resp): """ Extract the actual error message from a solr response. """ reason = resp.headers.get('reason', None) full_response = None if reason is None: try: # if response is in json format reason = resp.json()['error']['msg'] except KeyError: # if json response has unexpected structure full_response = resp.content except ValueError: # otherwise we assume it's html reason, full_html = self._scrape_response(resp.headers, resp.content) full_response = unescape_html(full_html) msg = "[Reason: %s]" % reason if reason is None: msg += "\n%s" % full_response return msg
python
{ "resource": "" }
q32451
Solr._scrape_response
train
def _scrape_response(self, headers, response): """ Scrape the html response. """ # identify the responding server server_type = None server_string = headers.get('server', '') if server_string and 'jetty' in server_string.lower(): server_type = 'jetty' if server_string and 'coyote' in server_string.lower(): server_type = 'tomcat' reason = None full_html = '' dom_tree = None # In Python3, response can be made of bytes if IS_PY3 and hasattr(response, 'decode'): response = response.decode() if response.startswith('<?xml'): # Try a strict XML parse try: soup = ElementTree.fromstring(response) reason_node = soup.find('lst[@name="error"]/str[@name="msg"]') tb_node = soup.find('lst[@name="error"]/str[@name="trace"]') if reason_node is not None: full_html = reason = reason_node.text.strip() if tb_node is not None: full_html = tb_node.text.strip() if reason is None: reason = full_html # Since we had a precise match, we'll return the results now: if reason and full_html: return reason, full_html except ElementTree.ParseError: # XML parsing error, so we'll let the more liberal code handle it. pass if server_type == 'tomcat': # Tomcat doesn't produce a valid XML response or consistent HTML: m = re.search(r'<(h1)[^>]*>\s*(.+?)\s*</\1>', response, re.IGNORECASE) if m: reason = m.group(2) else: full_html = "%s" % response else: # Let's assume others do produce a valid XML response try: dom_tree = ElementTree.fromstring(response) reason_node = None # html page might be different for every server if server_type == 'jetty': reason_node = dom_tree.find('body/pre') else: reason_node = dom_tree.find('head/title') if reason_node is not None: reason = reason_node.text if reason is None: full_html = ElementTree.tostring(dom_tree) except SyntaxError as err: LOG.warning('Unable to extract error message from invalid XML: %s', err, extra={'data': {'response': response}}) full_html = "%s" % response full_html = force_unicode(full_html) full_html = full_html.replace('\n', '') full_html = full_html.replace('\r', '') full_html = full_html.replace('<br/>', '') full_html = full_html.replace('<br />', '') full_html = full_html.strip() return reason, full_html
python
{ "resource": "" }
q32452
Solr._from_python
train
def _from_python(self, value): """ Converts python values to a form suitable for insertion into the xml we send to solr. """ if hasattr(value, 'strftime'): if hasattr(value, 'hour'): offset = value.utcoffset() if offset: value = value - offset value = value.replace(tzinfo=None).isoformat() + 'Z' else: value = "%sT00:00:00Z" % value.isoformat() elif isinstance(value, bool): if value: value = 'true' else: value = 'false' else: if IS_PY3: # Python 3.X if isinstance(value, bytes): value = str(value, errors='replace') # NOQA: F821 else: # Python 2.X if isinstance(value, str): value = unicode(value, errors='replace') # NOQA: F821 value = "{0}".format(value) return clean_xml_string(value)
python
{ "resource": "" }
q32453
Solr._to_python
train
def _to_python(self, value): """ Converts values from Solr to native Python values. """ if isinstance(value, (int, float, long, complex)): return value if isinstance(value, (list, tuple)): value = value[0] if value == 'true': return True elif value == 'false': return False is_string = False if IS_PY3: if isinstance(value, bytes): value = force_unicode(value) if isinstance(value, str): is_string = True else: if isinstance(value, str): value = force_unicode(value) if isinstance(value, basestring): # NOQA: F821 is_string = True if is_string: possible_datetime = DATETIME_REGEX.search(value) if possible_datetime: date_values = possible_datetime.groupdict() for dk, dv in date_values.items(): date_values[dk] = int(dv) return datetime.datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second']) try: # This is slightly gross but it's hard to tell otherwise what the # string's original type might have been. return ast.literal_eval(value) except (ValueError, SyntaxError): # If it fails, continue on. pass return value
python
{ "resource": "" }
q32454
Solr._is_null_value
train
def _is_null_value(self, value): """ Check if a given value is ``null``. Criteria for this is based on values that shouldn't be included in the Solr ``add`` request at all. """ if value is None: return True if IS_PY3: # Python 3.X if isinstance(value, str) and len(value) == 0: return True else: # Python 2.X if isinstance(value, basestring) and len(value) == 0: # NOQA: F821 return True # TODO: This should probably be removed when solved in core Solr level? return False
python
{ "resource": "" }
q32455
Solr.search
train
def search(self, q, search_handler=None, **kwargs): """ Performs a search and returns the results. Requires a ``q`` for a string version of the query to run. Optionally accepts ``**kwargs`` for additional options to be passed through the Solr URL. Returns ``self.results_cls`` class object (defaults to ``pysolr.Results``) Usage:: # All docs. results = solr.search('*:*') # Search with highlighting. results = solr.search('ponies', **{ 'hl': 'true', 'hl.fragsize': 10, }) """ params = {'q': q} params.update(kwargs) response = self._select(params, handler=search_handler) decoded = self.decoder.decode(response) self.log.debug( "Found '%s' search results.", # cover both cases: there is no response key or value is None (decoded.get('response', {}) or {}).get('numFound', 0) ) return self.results_cls(decoded)
python
{ "resource": "" }
q32456
Solr.more_like_this
train
def more_like_this(self, q, mltfl, handler='mlt', **kwargs): """ Finds and returns results similar to the provided query. Returns ``self.results_cls`` class object (defaults to ``pysolr.Results``) Requires Solr 1.3+. Usage:: similar = solr.more_like_this('id:doc_234', 'text') """ params = { 'q': q, 'mlt.fl': mltfl, } params.update(kwargs) response = self._mlt(params, handler=handler) decoded = self.decoder.decode(response) self.log.debug( "Found '%s' MLT results.", # cover both cases: there is no response key or value is None (decoded.get('response', {}) or {}).get('numFound', 0) ) return self.results_cls(decoded)
python
{ "resource": "" }
q32457
Solr.suggest_terms
train
def suggest_terms(self, fields, prefix, handler='terms', **kwargs): """ Accepts a list of field names and a prefix Returns a dictionary keyed on field name containing a list of ``(term, count)`` pairs Requires Solr 1.4+. """ params = { 'terms.fl': fields, 'terms.prefix': prefix, } params.update(kwargs) response = self._suggest_terms(params, handler=handler) result = self.decoder.decode(response) terms = result.get("terms", {}) res = {} # in Solr 1.x the value of terms is a flat list: # ["field_name", ["dance",23,"dancers",10,"dancing",8,"dancer",6]] # # in Solr 3.x the value of terms is a dict: # {"field_name": ["dance",23,"dancers",10,"dancing",8,"dancer",6]} if isinstance(terms, (list, tuple)): terms = dict(zip(terms[0::2], terms[1::2])) for field, values in terms.items(): tmp = [] while values: tmp.append((values.pop(0), values.pop(0))) res[field] = tmp self.log.debug("Found '%d' Term suggestions results.", sum(len(j) for i, j in res.items())) return res
python
{ "resource": "" }
q32458
Solr.add
train
def add(self, docs, boost=None, fieldUpdates=None, commit=None, softCommit=False, commitWithin=None, waitFlush=None, waitSearcher=None, overwrite=None, handler='update'): """ Adds or updates documents. Requires ``docs``, which is a list of dictionaries. Each key is the field name and each value is the value to index. Optionally accepts ``commit``. Default is ``None``. None signals to use default Optionally accepts ``softCommit``. Default is ``False``. Optionally accepts ``boost``. Default is ``None``. Optionally accepts ``fieldUpdates``. Default is ``None``. Optionally accepts ``commitWithin``. Default is ``None``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Optionally accepts ``overwrite``. Default is ``None``. Usage:: solr.add([ { "id": "doc_1", "title": "A test document", }, { "id": "doc_2", "title": "The Banana: Tasty or Dangerous?", }, ]) """ start_time = time.time() self.log.debug("Starting to build add request...") message = ElementTree.Element('add') if commitWithin: message.set('commitWithin', commitWithin) for doc in docs: el = self._build_doc(doc, boost=boost, fieldUpdates=fieldUpdates) message.append(el) # This returns a bytestring. Ugh. m = ElementTree.tostring(message, encoding='utf-8') # Convert back to Unicode please. m = force_unicode(m) end_time = time.time() self.log.debug("Built add request of %s docs in %0.2f seconds.", len(message), end_time - start_time) return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, overwrite=overwrite, handler=handler)
python
{ "resource": "" }
q32459
Solr.delete
train
def delete(self, id=None, q=None, commit=None, softCommit=False, waitFlush=None, waitSearcher=None, handler='update'): # NOQA: A002 """ Deletes documents. Requires *either* ``id`` or ``query``. ``id`` is if you know the specific document id to remove. Note that ``id`` can also be a list of document ids to be deleted. ``query`` is a Lucene-style query indicating a collection of documents to delete. Optionally accepts ``commit``. Default is ``True``. Optionally accepts ``softCommit``. Default is ``False``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Usage:: solr.delete(id='doc_12') solr.delete(id=['doc_1', 'doc_3']) solr.delete(q='*:*') """ if id is None and q is None: raise ValueError('You must specify "id" or "q".') elif id is not None and q is not None: raise ValueError('You many only specify "id" OR "q", not both.') elif id is not None: if not isinstance(id, (list, set, tuple)): doc_id = [id] else: doc_id = list(filter(None, id)) if doc_id: m = '<delete>%s</delete>' % ''.join('<id>%s</id>' % i for i in doc_id) else: raise ValueError('The list of documents to delete was empty.') elif q is not None: m = '<delete><query>%s</query></delete>' % q return self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
python
{ "resource": "" }
q32460
Solr.commit
train
def commit(self, softCommit=False, waitFlush=None, waitSearcher=None, expungeDeletes=None, handler='update'): """ Forces Solr to write the index data to disk. Optionally accepts ``expungeDeletes``. Default is ``None``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Optionally accepts ``softCommit``. Default is ``False``. Usage:: solr.commit() """ if expungeDeletes is not None: msg = '<commit expungeDeletes="%s" />' % str(bool(expungeDeletes)).lower() else: msg = '<commit />' return self._update(msg, commit=not softCommit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
python
{ "resource": "" }
q32461
Solr.optimize
train
def optimize(self, commit=True, waitFlush=None, waitSearcher=None, maxSegments=None, handler='update'): """ Tells Solr to streamline the number of segments used, essentially a defragmentation operation. Optionally accepts ``maxSegments``. Default is ``None``. Optionally accepts ``waitFlush``. Default is ``None``. Optionally accepts ``waitSearcher``. Default is ``None``. Usage:: solr.optimize() """ if maxSegments: msg = '<optimize maxSegments="%d" />' % maxSegments else: msg = '<optimize />' return self._update(msg, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher, handler=handler)
python
{ "resource": "" }
q32462
Solr.ping
train
def ping(self, handler='admin/ping', **kwargs): """ Sends a ping request. Usage:: solr.ping() """ params = kwargs params_encoded = safe_urlencode(params, True) if len(params_encoded) < 1024: # Typical case. path = '%s/?%s' % (handler, params_encoded) return self._send_request('get', path) else: # Handles very long queries by submitting as a POST. path = '%s/' % handler headers = { 'Content-type': 'application/x-www-form-urlencoded; charset=utf-8', } return self._send_request('post', path, body=params_encoded, headers=headers)
python
{ "resource": "" }
q32463
InteractiveKeyBindings.format_response
train
def format_response(self, response): """ formats a response in a binary """ conversion = self.shell_ctx.config.BOOLEAN_STATES if response in conversion: if conversion[response]: return 'yes' return 'no' raise ValueError('Invalid response: input should equate to true or false')
python
{ "resource": "" }
q32464
get_window_dim
train
def get_window_dim(): """ gets the dimensions depending on python version and os""" version = sys.version_info if version >= (3, 3): return _size_36() if platform.system() == 'Windows': return _size_windows() return _size_27()
python
{ "resource": "" }
q32465
_size_36
train
def _size_36(): """ returns the rows, columns of terminal """ from shutil import get_terminal_size dim = get_terminal_size() if isinstance(dim, list): return dim[0], dim[1] return dim.lines, dim.columns
python
{ "resource": "" }
q32466
update_frequency
train
def update_frequency(shell_ctx): """ updates the frequency from files """ frequency_path = os.path.join(shell_ctx.config.get_config_dir(), shell_ctx.config.get_frequency()) if os.path.exists(frequency_path): with open(frequency_path, 'r') as freq: try: frequency = json.load(freq) except ValueError: frequency = {} else: frequency = {} with open(frequency_path, 'w') as freq: now = day_format(datetime.datetime.utcnow()) val = frequency.get(now) frequency[now] = val + 1 if val else 1 json.dump(frequency, freq) return frequency
python
{ "resource": "" }
q32467
frequency_measurement
train
def frequency_measurement(shell_ctx): """ measures how many times a user has used this program in the last calendar week """ freq = update_frequency(shell_ctx) count = 0 base = datetime.datetime.utcnow() date_list = [base - datetime.timedelta(days=x) for x in range(0, DAYS_AGO)] for day in date_list: count += 1 if freq.get(day_format(day), 0) > 0 else 0 return count
python
{ "resource": "" }
q32468
get_public_ip_validator
train
def get_public_ip_validator(): """ Retrieves a validator for public IP address. Accepting all defaults will perform a check for an existing name or ID with no ARM-required -type parameter. """ from msrestazure.tools import is_valid_resource_id, resource_id def simple_validator(cmd, namespace): if namespace.public_ip_address: is_list = isinstance(namespace.public_ip_address, list) def _validate_name_or_id(public_ip): # determine if public_ip_address is name or ID is_id = is_valid_resource_id(public_ip) return public_ip if is_id else resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, namespace='Microsoft.Network', type='publicIPAddresses', name=public_ip) if is_list: for i, public_ip in enumerate(namespace.public_ip_address): namespace.public_ip_address[i] = _validate_name_or_id(public_ip) else: namespace.public_ip_address = _validate_name_or_id(namespace.public_ip_address) return simple_validator
python
{ "resource": "" }
q32469
load_help_files
train
def load_help_files(data): """ loads all the extra information from help files """ for command_name, help_yaml in helps.items(): help_entry = yaml.safe_load(help_yaml) try: help_type = help_entry['type'] except KeyError: continue # if there is extra help for this command but it's not reflected in the command table if command_name not in data and help_type == 'command': logger.debug('Command: %s not found in command table', command_name) continue short_summary = help_entry.get('short-summary') if short_summary and help_type == 'command': data[command_name]['help'] = short_summary else: # must be a command group or sub-group data[command_name] = {'help': short_summary} continue if 'parameters' in help_entry: for param in help_entry['parameters']: # this could fail if the help file and options list are not in the same order param_name = param['name'].split()[0] if param_name not in data[command_name]['parameters']: logger.debug('Command %s does not have parameter: %s', command_name, param_name) continue if 'short-summary' in param: data[command_name]['parameters'][param_name]['help'] = param["short-summary"] if 'examples' in help_entry: data[command_name]['examples'] = [[example['name'], example['text']] for example in help_entry['examples']]
python
{ "resource": "" }
q32470
get_cache_dir
train
def get_cache_dir(shell_ctx): """ gets the location of the cache """ azure_folder = shell_ctx.config.get_config_dir() cache_path = os.path.join(azure_folder, 'cache') if not os.path.exists(azure_folder): os.makedirs(azure_folder) if not os.path.exists(cache_path): os.makedirs(cache_path) return cache_path
python
{ "resource": "" }
q32471
FreshTable.dump_command_table
train
def dump_command_table(self, shell_ctx=None): """ dumps the command table """ from azure.cli.core.commands.arm import register_global_subscription_argument, register_ids_argument from knack import events import timeit start_time = timeit.default_timer() shell_ctx = shell_ctx or self.shell_ctx main_loader = AzInteractiveCommandsLoader(shell_ctx.cli_ctx) main_loader.load_command_table(None) main_loader.load_arguments(None) register_global_subscription_argument(shell_ctx.cli_ctx) register_ids_argument(shell_ctx.cli_ctx) shell_ctx.cli_ctx.raise_event(events.EVENT_INVOKER_POST_CMD_TBL_CREATE, commands_loader=main_loader) cmd_table = main_loader.command_table cmd_table_data = {} for command_name, cmd in cmd_table.items(): try: command_description = cmd.description if callable(command_description): command_description = command_description() # checking all the parameters for a single command parameter_metadata = {} for arg in cmd.arguments.values(): options = { 'name': [name for name in arg.options_list], 'required': REQUIRED_TAG if arg.type.settings.get('required') else '', 'help': arg.type.settings.get('help') or '' } # the key is the first alias option if arg.options_list: parameter_metadata[arg.options_list[0]] = options cmd_table_data[command_name] = { 'parameters': parameter_metadata, 'help': command_description, 'examples': '' } except (ImportError, ValueError): pass load_help_files(cmd_table_data) elapsed = timeit.default_timer() - start_time logger.debug('Command table dumped: %s sec', elapsed) FreshTable.loader = main_loader # dump into the cache file command_file = shell_ctx.config.get_help_files() with open(os.path.join(get_cache_dir(shell_ctx), command_file), 'w') as help_file: json.dump(cmd_table_data, help_file, default=lambda x: x.target or '', skipkeys=True)
python
{ "resource": "" }
q32472
_query_account_key
train
def _query_account_key(cli_ctx, account_name): """Query the storage account key. This is used when the customer doesn't offer account key but name.""" rg, scf = _query_account_rg(cli_ctx, account_name) t_storage_account_keys = get_sdk( cli_ctx, CUSTOM_MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys') if t_storage_account_keys: return scf.storage_accounts.list_keys(rg, account_name).key1 # of type: models.storage_account_list_keys_result#StorageAccountListKeysResult return scf.storage_accounts.list_keys(rg, account_name).keys[0].value
python
{ "resource": "" }
q32473
_query_account_rg
train
def _query_account_rg(cli_ctx, account_name): """Query the storage account's resource group, which the mgmt sdk requires.""" scf = get_mgmt_service_client(cli_ctx, CUSTOM_MGMT_STORAGE) acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None) if acc: from msrestazure.tools import parse_resource_id return parse_resource_id(acc.id)['resource_group'], scf raise ValueError("Storage account '{}' not found.".format(account_name))
python
{ "resource": "" }
q32474
process_resource_group
train
def process_resource_group(cmd, namespace): """Processes the resource group parameter from the account name""" if namespace.account_name and not namespace.resource_group_name: namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
python
{ "resource": "" }
q32475
validate_client_parameters
train
def validate_client_parameters(cmd, namespace): """ Retrieves storage connection parameters from environment variables and parses out connection string into account name and key """ n = namespace def get_config_value(section, key, default): return cmd.cli_ctx.config.get(section, key, default) if hasattr(n, 'auth_mode'): auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None) del n.auth_mode if not n.account_name: n.account_name = get_config_value('storage', 'account', None) if auth_mode == 'login': n.token_credential = _create_token_credential(cmd.cli_ctx) # give warning if there are account key args being ignored account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token", n.connection_string and "--connection-string"] account_key_args = [arg for arg in account_key_args if arg] if account_key_args: from knack.log import get_logger logger = get_logger(__name__) logger.warning('In "login" auth mode, the following arguments are ignored: %s', ' ,'.join(account_key_args)) return if not n.connection_string: n.connection_string = get_config_value('storage', 'connection_string', None) # if connection string supplied or in environment variables, extract account key and name if n.connection_string: conn_dict = validate_key_value_pairs(n.connection_string) n.account_name = conn_dict.get('AccountName') n.account_key = conn_dict.get('AccountKey') if not n.account_name or not n.account_key: from knack.util import CLIError raise CLIError('Connection-string: %s, is malformed. Some shell environments require the ' 'connection string to be surrounded by quotes.' % n.connection_string) # otherwise, simply try to retrieve the remaining variables from environment variables if not n.account_name: n.account_name = get_config_value('storage', 'account', None) if not n.account_key: n.account_key = get_config_value('storage', 'key', None) if not n.sas_token: n.sas_token = get_config_value('storage', 'sas_token', None) # strip the '?' from sas token. the portal and command line are returns sas token in different # forms if n.sas_token: n.sas_token = n.sas_token.lstrip('?') # if account name is specified but no key, attempt to query if n.account_name and not n.account_key and not n.sas_token: n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
python
{ "resource": "" }
q32476
validate_encryption_services
train
def validate_encryption_services(cmd, namespace): """ Builds up the encryption services object for storage account operations based on the list of services passed in. """ if namespace.encryption_services: t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, CUSTOM_MGMT_STORAGE, 'EncryptionServices', 'EncryptionService', mod='models') services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services} namespace.encryption_services = t_encryption_services(**services)
python
{ "resource": "" }
q32477
get_file_path_validator
train
def get_file_path_validator(default_file_param=None): """ Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename. """ def validator(namespace): if not hasattr(namespace, 'path'): return path = namespace.path dir_name, file_name = os.path.split(path) if path else (None, '') if default_file_param and '.' not in file_name: dir_name = path file_name = os.path.split(getattr(namespace, default_file_param))[1] namespace.directory_name = dir_name namespace.file_name = file_name del namespace.path return validator
python
{ "resource": "" }
q32478
ipv4_range_type
train
def ipv4_range_type(string): """ Validates an IPv4 address or address range. """ import re ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' if not re.match("^{}$".format(ip_format), string): if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string): raise ValueError return string
python
{ "resource": "" }
q32479
resource_type_type
train
def resource_type_type(loader): """ Returns a function which validates that resource types string contains only a combination of service, container, and object. Their shorthand representations are s, c, and o. """ def impl(string): t_resources = loader.get_models('common.models#ResourceTypes') if set(string) - set("sco"): raise ValueError return t_resources(_str=''.join(set(string))) return impl
python
{ "resource": "" }
q32480
services_type
train
def services_type(loader): """ Returns a function which validates that services string contains only a combination of blob, queue, table, and file. Their shorthand representations are b, q, t, and f. """ def impl(string): t_services = loader.get_models('common.models#Services') if set(string) - set("bqtf"): raise ValueError return t_services(_str=''.join(set(string))) return impl
python
{ "resource": "" }
q32481
validate_k8s_version
train
def validate_k8s_version(namespace): """Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server to use its default version.""" if namespace.kubernetes_version: k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$') found = k8s_release_regex.findall(namespace.kubernetes_version) if found: namespace.kubernetes_version = found[0] else: raise CLIError('--kubernetes-version should be the full version number, ' 'such as "1.7.12" or "1.8.7"')
python
{ "resource": "" }
q32482
validate_linux_host_name
train
def validate_linux_host_name(namespace): """Validates a string as a legal host name component. This validation will also occur server-side in the ARM API, but that may take a minute or two before the user sees it. So it's more user-friendly to validate in the CLI pre-flight. """ # https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long found = rfc1123_regex.findall(namespace.name) if not found: raise CLIError('--name cannot exceed 63 characters and can only contain ' 'letters, numbers, or dashes (-).')
python
{ "resource": "" }
q32483
validate_max_pods
train
def validate_max_pods(namespace): """Validates that max_pods is set to a reasonable minimum number.""" # kube-proxy and kube-svc reside each nodes, # 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count) if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required: raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.' .format(minimum_pods_required))
python
{ "resource": "" }
q32484
validate_nodes_count
train
def validate_nodes_count(namespace): """Validate that min_count and max_count is set to 1-100""" if namespace.min_count is not None: if namespace.min_count < 1 or namespace.min_count > 100: raise CLIError('--min-count must be in the range [1,100]') if namespace.max_count is not None: if namespace.max_count < 1 or namespace.max_count > 100: raise CLIError('--max-count must be in the range [1,100]')
python
{ "resource": "" }
q32485
validate_nodepool_name
train
def validate_nodepool_name(namespace): """Validates a nodepool name to be at most 12 characters, alphanumeric only.""" if namespace.nodepool_name != "": if len(namespace.nodepool_name) > 12: raise CLIError('--nodepool-name can contain atmost 12 characters') if not namespace.nodepool_name.isalnum(): raise CLIError('--nodepool-name should only contain alphanumeric characters')
python
{ "resource": "" }
q32486
CloudStorageAccount.create_block_blob_service
train
def create_block_blob_service(self): ''' Creates a BlockBlobService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService` ''' try: from azure.storage.blob.blockblobservice import BlockBlobService return BlockBlobService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"')
python
{ "resource": "" }
q32487
CloudStorageAccount.create_page_blob_service
train
def create_page_blob_service(self): ''' Creates a PageBlobService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService` ''' try: from azure.storage.blob.pageblobservice import PageBlobService return PageBlobService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"')
python
{ "resource": "" }
q32488
CloudStorageAccount.create_append_blob_service
train
def create_append_blob_service(self): ''' Creates a AppendBlobService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService` ''' try: from azure.storage.blob.appendblobservice import AppendBlobService return AppendBlobService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"')
python
{ "resource": "" }
q32489
CloudStorageAccount.create_queue_service
train
def create_queue_service(self): ''' Creates a QueueService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.queue.queueservice.QueueService` ''' try: from azure.storage.queue.queueservice import QueueService return QueueService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-queue is required. ' + 'Please install it using "pip install azure-storage-queue"')
python
{ "resource": "" }
q32490
QueueService.exists
train
def exists(self, queue_name, timeout=None): ''' Returns a boolean indicating whether the queue exists. :param str queue_name: The name of queue to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue exists. :rtype: bool ''' try: self.get_queue_metadata(queue_name, timeout=timeout) return True except AzureHttpError as ex: _dont_fail_not_exist(ex) return False
python
{ "resource": "" }
q32491
space_toolbar
train
def space_toolbar(settings_items, empty_space): """ formats the toolbar """ counter = 0 for part in settings_items: counter += len(part) if len(settings_items) == 1: spacing = '' else: spacing = empty_space[ :int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))] settings = spacing.join(settings_items) empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:] return settings, empty_space
python
{ "resource": "" }
q32492
AzInteractiveShell.cli
train
def cli(self): """ Makes the interface or refreshes it """ if self._cli is None: self._cli = self.create_interface() return self._cli
python
{ "resource": "" }
q32493
AzInteractiveShell.on_input_timeout
train
def on_input_timeout(self, cli): """ brings up the metadata for the command if there is a valid command already typed """ document = cli.current_buffer.document text = document.text text = text.replace('az ', '') if self.default_command: text = self.default_command + ' ' + text param_info, example = self.generate_help_text() self.param_docs = u'{}'.format(param_info) self.example_docs = u'{}'.format(example) self._update_default_info() cli.buffers['description'].reset( initial_document=Document(self.description_docs, cursor_position=0)) cli.buffers['parameter'].reset( initial_document=Document(self.param_docs)) cli.buffers['examples'].reset( initial_document=Document(self.example_docs)) cli.buffers['default_values'].reset( initial_document=Document( u'{}'.format(self.config_default if self.config_default else 'No Default Values'))) self._update_toolbar() cli.request_redraw()
python
{ "resource": "" }
q32494
AzInteractiveShell._space_examples
train
def _space_examples(self, list_examples, rows, section_value): """ makes the example text """ examples_with_index = [] for i, _ in list(enumerate(list_examples)): if len(list_examples[i]) > 1: examples_with_index.append("[" + str(i + 1) + "] " + list_examples[i][0] + list_examples[i][1]) example = "".join(exam for exam in examples_with_index) num_newline = example.count('\n') page_number = '' if num_newline > rows * PART_SCREEN_EXAMPLE and rows > PART_SCREEN_EXAMPLE * 10: len_of_excerpt = math.floor(float(rows) * PART_SCREEN_EXAMPLE) group = example.split('\n') end = int(section_value * len_of_excerpt) begin = int((section_value - 1) * len_of_excerpt) if end < num_newline: example = '\n'.join(group[begin:end]) + "\n" else: # default chops top off example = '\n'.join(group[begin:]) + "\n" while ((section_value - 1) * len_of_excerpt) > num_newline: self.example_page -= 1 page_number = '\n' + str(section_value) + "/" + str(int(math.ceil(num_newline / len_of_excerpt))) return example + page_number + ' CTRL+Y (^) CTRL+N (v)'
python
{ "resource": "" }
q32495
AzInteractiveShell.generate_help_text
train
def generate_help_text(self): """ generates the help text based on commands typed """ param_descrip = example = "" self.description_docs = u'' rows, _ = get_window_dim() rows = int(rows) param_args = self.completer.leftover_args last_word = self.completer.unfinished_word command = self.completer.current_command new_command = ' '.join([command, last_word]).strip() if not self.completer.complete_command and new_command in self.completer.command_description: command = new_command # get command/group help if self.completer and command in self.completer.command_description: self.description_docs = u'{}'.format(self.completer.command_description[command]) # get parameter help if full command if self.completer and command in self.completer.command_param_info: param = param_args[-1] if param_args else '' param = last_word if last_word.startswith('-') else param if param in self.completer.command_param_info[command] and self.completer.has_description( command + " " + param): param_descrip = ''.join([ param, ":", '\n', self.completer.param_description.get(command + " " + param, '')]) if command in self.completer.command_examples: string_example = [] for example in self.completer.command_examples[command]: for part in example: string_example.append(part) ''.join(string_example) example = self._space_examples( self.completer.command_examples[command], rows, self.example_page) return param_descrip, example
python
{ "resource": "" }
q32496
AzInteractiveShell.create_application
train
def create_application(self, full_layout=True): """ makes the application object and the buffers """ layout_manager = LayoutManager(self) if full_layout: layout = layout_manager.create_layout(ExampleLexer, ToolbarLexer) else: layout = layout_manager.create_tutorial_layout() buffers = { DEFAULT_BUFFER: Buffer(is_multiline=True), 'description': Buffer(is_multiline=True, read_only=True), 'parameter': Buffer(is_multiline=True, read_only=True), 'examples': Buffer(is_multiline=True, read_only=True), 'bottom_toolbar': Buffer(is_multiline=True), 'example_line': Buffer(is_multiline=True), 'default_values': Buffer(), 'symbols': Buffer(), 'progress': Buffer(is_multiline=False) } writing_buffer = Buffer( history=self.history, auto_suggest=AutoSuggestFromHistory(), enable_history_search=True, completer=self.completer, complete_while_typing=Always() ) return Application( mouse_support=False, style=self.style, buffer=writing_buffer, on_input_timeout=self.on_input_timeout, key_bindings_registry=InteractiveKeyBindings(self).registry, layout=layout, buffers=buffers, )
python
{ "resource": "" }
q32497
AzInteractiveShell.set_prompt
train
def set_prompt(self, prompt_command="", position=0): """ writes the prompt line """ self.description_docs = u'{}'.format(prompt_command) self.cli.current_buffer.reset( initial_document=Document( self.description_docs, cursor_position=position)) self.cli.request_redraw()
python
{ "resource": "" }
q32498
AzInteractiveShell.set_scope
train
def set_scope(self, value): """ narrows the scopes the commands """ if self.default_command: self.default_command += ' ' + value else: self.default_command += value return value
python
{ "resource": "" }
q32499
AzInteractiveShell.handle_example
train
def handle_example(self, text, continue_flag): """ parses for the tutorial """ cmd = text.partition(SELECT_SYMBOL['example'])[0].rstrip() num = text.partition(SELECT_SYMBOL['example'])[2].strip() example = "" try: num = int(num) - 1 except ValueError: print("An Integer should follow the colon", file=self.output) return "" if cmd in self.completer.command_examples: if num >= 0 and num < len(self.completer.command_examples[cmd]): example = self.completer.command_examples[cmd][num][1] example = example.replace('\n', '') else: print('Invalid example number', file=self.output) return '', True example = example.replace('az', '') starting_index = None counter = 0 example_no_fill = "" flag_fill = True for word in example.split(): if flag_fill: example_no_fill += word + " " if word.startswith('-'): example_no_fill += word + " " if not starting_index: starting_index = counter flag_fill = False counter += 1 return self.example_repl(example_no_fill, example, starting_index, continue_flag)
python
{ "resource": "" }