INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
returns the vector moved one step in the direction of the other potentially diagonally.
def step_towards(self, other): '''returns the vector moved one step in the direction of the other, potentially diagonally.''' return self + Vector( ( (self[0] < other[0]) - (self[0] > other[0]), (self[1] < other[1]) - (self[1] > other[1]), ) )
Takes a single character string as input and alters the game state according to that input. Mostly this means moving the player around. Returns a new game state and boolean indicating whether the input had an effect on the state.
def handle_input(self, input): '''Takes a single character string as input and alters the game state according to that input. Mostly, this means moving the player around. Returns a new game state and boolean indicating whether the input had an effect on the state.''' dirs = { 'h': (-1, 0), 'j': (0, 1), 'k': (0, -1), 'l': (1, 0), 'y': (-1, -1), 'u': (1, -1), 'n': (1, 1), 'b': (-1, 1), } if input in dirs: new_self = (lens.player + dirs[input])(self) if not new_self.player.inside(): return self, False return new_self, True elif input == '.': return self, True elif input == 'q': return self.end_game(), False elif input == 't': self = lens.player.set(Vector.random())(self) return self, True else: return self, False
Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.
def advance_robots(self): '''Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.''' # move the robots towards the player self = lens.robots.Each().call_step_towards(self.player)(self) # robots in the same place are crashes self = lens.crashes.call_union(duplicates(self.robots))(self) # remove crashed robots self = lens.robots.modify(lambda r: list(set(r) - self.crashes))(self) return self
Checks for the game s win/ lose conditions and alters the game state to reflect the condition found. If the game has not been won or lost then it just returns the game state unaltered.
def check_game_end(self): '''Checks for the game's win/lose conditions and 'alters' the game state to reflect the condition found. If the game has not been won or lost then it just returns the game state unaltered.''' if self.player in self.crashes.union(self.robots): return self.end_game('You Died!') elif not self.robots: return self.end_game('You Win!') else: return self
Returns a completed game state object setting an optional message to display after the game is over.
def end_game(self, message=''): '''Returns a completed game state object, setting an optional message to display after the game is over.''' return lens.running.set(False)(lens.message.set(message)(self))
Shows the board to the player on the console and asks them to make a move.
def player_move(board): '''Shows the board to the player on the console and asks them to make a move.''' print(board, end='\n\n') x, y = input('Enter move (e.g. 2b): ') print() return int(x) - 1, ord(y) - ord('a')
Play a game of naughts and crosses against the computer.
def play(): 'Play a game of naughts and crosses against the computer.' ai = {'X': player_move, 'O': random_move} board = Board() while not board.winner: x, y = ai[board.player](board) board = board.make_move(x, y) print(board, end='\n\n') print(board.winner)
Return a board with a cell filled in by the current player. If the cell is already occupied then return the board unchanged.
def make_move(self, x, y): '''Return a board with a cell filled in by the current player. If the cell is already occupied then return the board unchanged.''' if self.board[y][x] == ' ': return lens.board[y][x].set(self.player)(self) return self
The winner of this board if one exists.
def winner(self): 'The winner of this board if one exists.' for potential_win in self._potential_wins(): if potential_win == tuple('XXX'): return Outcome.win_for_crosses elif potential_win == tuple('OOO'): return Outcome.win_for_naughts if self._count(' ') == 0: return Outcome.draw return Outcome.ongoing
Generates all the combinations of board positions that need to be checked for a win.
def _potential_wins(self): '''Generates all the combinations of board positions that need to be checked for a win.''' yield from self.board yield from zip(*self.board) yield self.board[0][0], self.board[1][1], self.board[2][2] yield self.board[0][2], self.board[1][1], self.board[2][0]
Process single item. Add item to items and then upload to S3 if size of items > = max_chunk_size.
def process_item(self, item, spider): """ Process single item. Add item to items and then upload to S3 if size of items >= max_chunk_size. """ self.items.append(item) if len(self.items) >= self.max_chunk_size: self._upload_chunk(spider) return item
Callback function when spider is open.
def open_spider(self, spider): """ Callback function when spider is open. """ # Store timestamp to replace {time} in S3PIPELINE_URL self.ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')
Do upload items to S3.
def _upload_chunk(self, spider): """ Do upload items to S3. """ if not self.items: return # Do nothing when items is empty. f = self._make_fileobj() # Build object key by replacing variables in object key template. object_key = self.object_key_template.format(**self._get_uri_params(spider)) try: self.s3.upload_fileobj(f, self.bucket_name, object_key) except ClientError: self.stats.inc_value('pipeline/s3/fail') raise else: self.stats.inc_value('pipeline/s3/success') finally: # Prepare for the next chunk self.chunk_number += len(self.items) self.items = []
Build file object from items.
def _make_fileobj(self): """ Build file object from items. """ bio = BytesIO() f = gzip.GzipFile(mode='wb', fileobj=bio) if self.use_gzip else bio # Build file object using ItemExporter exporter = JsonLinesItemExporter(f) exporter.start_exporting() for item in self.items: exporter.export_item(item) exporter.finish_exporting() if f is not bio: f.close() # Close the file if GzipFile # Seek to the top of file to be read later bio.seek(0) return bio
To be used under python2. 4 because functools. update_wrapper () is available only from python2. 5 +
def update_wrapper(wrapper, wrapped): """ To be used under python2.4 because functools.update_wrapper() is available only from python2.5+ """ for attr_name in ('__module__', '__name__', '__doc__'): attr_value = getattr(wrapped, attr_name, None) if attr_value is not None: setattr(wrapper, attr_name, attr_value) wrapper.__dict__.update(getattr(wrapped, '__dict__', {})) return wrapper
Returns the account state information associated with a specific address.
def get_account_state(self, address, **kwargs): """ Returns the account state information associated with a specific address. :param address: a 34-bit length address (eg. AJBENSwajTzQtwyJFkiJSv7MAaaMc7DsRz) :type address: str :return: dictionary containing the account state information :rtype: dict """ return self._call(JSONRPCMethods.GET_ACCOUNT_STATE.value, params=[address, ], **kwargs)
Returns the asset information associated with a specific asset ID.
def get_asset_state(self, asset_id, **kwargs): """ Returns the asset information associated with a specific asset ID. :param asset_id: an asset identifier (the transaction ID of the RegistTransaction when the asset is registered) :type asset_id: str :return: dictionary containing the asset state information :rtype: dict """ return self._call(JSONRPCMethods.GET_ASSET_STATE.value, params=[asset_id, ], **kwargs)
Returns the block information associated with a specific hash value or block index.
def get_block(self, block_hash, verbose=True, **kwargs): """ Returns the block information associated with a specific hash value or block index. :param block_hash: a block hash value or a block index (block height) :param verbose: a boolean indicating whether the detailed block information should be returned in JSON format (otherwise the block information is returned as an hexadecimal string by the JSON-RPC endpoint) :type block_hash: str or int :type verbose: bool :return: dictionary containing the block information (or an hexadecimal string if verbose is set to False) :rtype: dict or str """ return self._call( JSONRPCMethods.GET_BLOCK.value, params=[block_hash, int(verbose), ], **kwargs)
Returns the hash value associated with a specific block index.
def get_block_hash(self, block_index, **kwargs): """ Returns the hash value associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: hash of the block associated with the considered index :rtype: str """ return self._call(JSONRPCMethods.GET_BLOCK_HASH.value, [block_index, ], **kwargs)
Returns the system fees associated with a specific block index.
def get_block_sys_fee(self, block_index, **kwargs): """ Returns the system fees associated with a specific block index. :param block_index: a block index (block height) :type block_index: int :return: system fees of the block, expressed in NeoGas units :rtype: str """ return self._call(JSONRPCMethods.GET_BLOCK_SYS_FEE.value, [block_index, ], **kwargs)
Returns the contract information associated with a specific script hash.
def get_contract_state(self, script_hash, **kwargs): """ Returns the contract information associated with a specific script hash. :param script_hash: contract script hash :type script_hash: str :return: dictionary containing the contract information :rtype: dict """ return self._call(JSONRPCMethods.GET_CONTRACT_STATE.value, [script_hash, ], **kwargs)
Returns detailed information associated with a specific transaction hash.
def get_raw_transaction(self, tx_hash, verbose=True, **kwargs): """ Returns detailed information associated with a specific transaction hash. :param tx_hash: transaction hash :param verbose: a boolean indicating whether the detailed transaction information should be returned in JSON format (otherwise the transaction information is returned as an hexadecimal string by the JSON-RPC endpoint) :type tx_hash: str :type verbose: bool :return: dictionary containing the transaction information (or an hexadecimal string if verbose is set to False) :rtype: dict or str """ return self._call( JSONRPCMethods.GET_RAW_TRANSACTION.value, params=[tx_hash, int(verbose), ], **kwargs)
Returns the value stored in the storage of a contract script hash for a given key.
def get_storage(self, script_hash, key, **kwargs): """ Returns the value stored in the storage of a contract script hash for a given key. :param script_hash: contract script hash :param key: key to look up in the storage :type script_hash: str :type key: str :return: value associated with the storage key :rtype: bytearray """ hexkey = binascii.hexlify(key.encode('utf-8')).decode('utf-8') hexresult = self._call( JSONRPCMethods.GET_STORAGE.value, params=[script_hash, hexkey, ], **kwargs) try: assert hexresult result = bytearray(binascii.unhexlify(hexresult.encode('utf-8'))) except AssertionError: result = hexresult return result
Returns the transaction output information corresponding to a hash and index.
def get_tx_out(self, tx_hash, index, **kwargs): """ Returns the transaction output information corresponding to a hash and index. :param tx_hash: transaction hash :param index: index of the transaction output to be obtained in the transaction (starts from 0) :type tx_hash: str :type index: int :return: dictionary containing the transaction output :rtype: dict """ return self._call(JSONRPCMethods.GET_TX_OUT.value, params=[tx_hash, index, ], **kwargs)
Invokes a contract with given parameters and returns the result.
def invoke(self, script_hash, params, **kwargs): """ Invokes a contract with given parameters and returns the result. It should be noted that the name of the function invoked in the contract should be part of paramaters. :param script_hash: contract script hash :param params: list of paramaters to be passed in to the smart contract :type script_hash: str :type params: list :return: result of the invocation :rtype: dictionary """ contract_params = encode_invocation_params(params) raw_result = self._call( JSONRPCMethods.INVOKE.value, [script_hash, contract_params, ], **kwargs) return decode_invocation_result(raw_result)
Invokes a contract s function with given parameters and returns the result.
def invoke_function(self, script_hash, operation, params, **kwargs): """ Invokes a contract's function with given parameters and returns the result. :param script_hash: contract script hash :param operation: name of the operation to invoke :param params: list of paramaters to be passed in to the smart contract :type script_hash: str :type operation: str :type params: list :return: result of the invocation :rtype: dictionary """ contract_params = encode_invocation_params(params) raw_result = self._call( JSONRPCMethods.INVOKE_FUNCTION.value, [script_hash, operation, contract_params, ], **kwargs) return decode_invocation_result(raw_result)
Invokes a script on the VM and returns the result.
def invoke_script(self, script, **kwargs): """ Invokes a script on the VM and returns the result. :param script: script runnable by the VM :type script: str :return: result of the invocation :rtype: dictionary """ raw_result = self._call(JSONRPCMethods.INVOKE_SCRIPT.value, [script, ], **kwargs) return decode_invocation_result(raw_result)
Broadcasts a transaction over the NEO network and returns the result.
def send_raw_transaction(self, hextx, **kwargs): """ Broadcasts a transaction over the NEO network and returns the result. :param hextx: hexadecimal string that has been serialized :type hextx: str :return: result of the transaction :rtype: bool """ return self._call(JSONRPCMethods.SEND_RAW_TRANSACTION.value, [hextx, ], **kwargs)
Validates if the considered string is a valid NEO address.
def validate_address(self, addr, **kwargs): """ Validates if the considered string is a valid NEO address. :param hex: string containing a potential NEO address :type hex: str :return: dictionary containing the result of the verification :rtype: dictionary """ return self._call(JSONRPCMethods.VALIDATE_ADDRESS.value, [addr, ], **kwargs)
Calls the JSON - RPC endpoint.
def _call(self, method, params=None, request_id=None): """ Calls the JSON-RPC endpoint. """ params = params or [] # Determines which 'id' value to use and increment the counter associated with the current # client instance if applicable. rid = request_id or self._id_counter if request_id is None: self._id_counter += 1 # Prepares the payload and the headers that will be used to forge the request. payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': rid} headers = {'Content-Type': 'application/json'} scheme = 'https' if self.tls else 'http' url = '{}://{}:{}'.format(scheme, self.host, self.port) # Calls the JSON-RPC endpoint! try: response = self.session.post(url, headers=headers, data=json.dumps(payload)) response.raise_for_status() except HTTPError: raise TransportError( 'Got unsuccessful response from server (status code: {})'.format( response.status_code), response=response) # Ensures the response body can be deserialized to JSON. try: response_data = response.json() except ValueError as e: raise ProtocolError( 'Unable to deserialize response body: {}'.format(e), response=response) # Properly handles potential errors. if response_data.get('error'): code = response_data['error'].get('code', '') message = response_data['error'].get('message', '') raise ProtocolError( 'Error[{}] {}'.format(code, message), response=response, data=response_data) elif 'result' not in response_data: raise ProtocolError( 'Response is empty (result field is missing)', response=response, data=response_data) return response_data['result']
Returns True if the considered string is a valid SHA256 hash.
def is_hash256(s): """ Returns True if the considered string is a valid SHA256 hash. """ if not s or not isinstance(s, str): return False return re.match('^[0-9A-F]{64}$', s.strip(), re.IGNORECASE)
Returns True if the considered string is a valid RIPEMD160 hash.
def is_hash160(s): """ Returns True if the considered string is a valid RIPEMD160 hash. """ if not s or not isinstance(s, str): return False if not len(s) == 40: return False for c in s: if (c < '0' or c > '9') and (c < 'A' or c > 'F') and (c < 'a' or c > 'f'): return False return True
Returns a list of paramaters meant to be passed to JSON - RPC endpoints.
def encode_invocation_params(params): """ Returns a list of paramaters meant to be passed to JSON-RPC endpoints. """ final_params = [] for p in params: if isinstance(p, bool): final_params.append({'type': ContractParameterTypes.BOOLEAN.value, 'value': p}) elif isinstance(p, int): final_params.append({'type': ContractParameterTypes.INTEGER.value, 'value': p}) elif is_hash256(p): final_params.append({'type': ContractParameterTypes.HASH256.value, 'value': p}) elif is_hash160(p): final_params.append({'type': ContractParameterTypes.HASH160.value, 'value': p}) elif isinstance(p, bytearray): final_params.append({'type': ContractParameterTypes.BYTE_ARRAY.value, 'value': p}) elif isinstance(p, str): final_params.append({'type': ContractParameterTypes.STRING.value, 'value': p}) elif isinstance(p, list): innerp = encode_invocation_params(p) final_params.append({'type': ContractParameterTypes.ARRAY.value, 'value': innerp}) return final_params
Tries to decode the values embedded in an invocation result dictionary.
def decode_invocation_result(result): """ Tries to decode the values embedded in an invocation result dictionary. """ if 'stack' not in result: return result result = copy.deepcopy(result) result['stack'] = _decode_invocation_result_stack(result['stack']) return result
Emulates keyword - only arguments under python2. Works with both python2 and python3. With this decorator you can convert all or some of the default arguments of your function into kwonly arguments. Use KWONLY_REQUIRED as the default value of required kwonly args.
def first_kwonly_arg(name): """ Emulates keyword-only arguments under python2. Works with both python2 and python3. With this decorator you can convert all or some of the default arguments of your function into kwonly arguments. Use ``KWONLY_REQUIRED`` as the default value of required kwonly args. :param name: The name of the first default argument to be treated as a keyword-only argument. This default argument along with all default arguments that follow this one will be treated as keyword only arguments. You can also pass here the ``FIRST_DEFAULT_ARG`` constant in order to select the first default argument. This way you turn all default arguments into keyword-only arguments. As a shortcut you can use the ``@kwonly_defaults`` decorator (without any parameters) instead of ``@first_kwonly_arg(FIRST_DEFAULT_ARG)``. >>> from kwonly_args import first_kwonly_arg, KWONLY_REQUIRED, FIRST_DEFAULT_ARG, kwonly_defaults >>> >>> # this decoration converts the ``d1`` and ``d2`` default args into kwonly args >>> @first_kwonly_arg('d1') >>> def func(a0, a1, d0='d0', d1='d1', d2='d2', *args, **kwargs): >>> print(a0, a1, d0, d1, d2, args, kwargs) >>> >>> func(0, 1, 2, 3, 4) 0 1 2 d1 d2 (3, 4) {} >>> >>> func(0, 1, 2, 3, 4, d2='my_param') 0 1 2 d1 my_param (3, 4) {} >>> >>> # d0 is an optional deyword argument, d1 is required >>> def func(d0='d0', d1=KWONLY_REQUIRED): >>> print(d0, d1) >>> >>> # The ``FIRST_DEFAULT_ARG`` constant automatically selects the first default argument so it >>> # turns all default arguments into keyword-only ones. Both d0 and d1 are keyword-only arguments. >>> @first_kwonly_arg(FIRST_DEFAULT_ARG) >>> def func(a0, a1, d0='d0', d1='d1'): >>> print(a0, a1, d0, d1) >>> >>> # ``@kwonly_defaults`` is a shortcut for the ``@first_kwonly_arg(FIRST_DEFAULT_ARG)`` >>> # in the previous example. This example has the same effect as the previous one. >>> @kwonly_defaults >>> def func(a0, a1, d0='d0', d1='d1'): >>> print(a0, a1, d0, d1) """ def decorate(wrapped): if sys.version_info[0] == 2: arg_names, varargs, _, defaults = inspect.getargspec(wrapped) else: arg_names, varargs, _, defaults = inspect.getfullargspec(wrapped)[:4] if not defaults: raise TypeError("You can't use @first_kwonly_arg on a function that doesn't have default arguments!") first_default_index = len(arg_names) - len(defaults) if name is FIRST_DEFAULT_ARG: first_kwonly_index = first_default_index else: try: first_kwonly_index = arg_names.index(name) except ValueError: raise ValueError("%s() doesn't have an argument with the specified first_kwonly_arg=%r name" % ( getattr(wrapped, '__name__', '?'), name)) if first_kwonly_index < first_default_index: raise ValueError("The specified first_kwonly_arg=%r must have a default value!" % (name,)) kwonly_defaults = defaults[-(len(arg_names)-first_kwonly_index):] kwonly_args = tuple(zip(arg_names[first_kwonly_index:], kwonly_defaults)) required_kwonly_args = frozenset(arg for arg, default in kwonly_args if default is KWONLY_REQUIRED) def wrapper(*args, **kwargs): if required_kwonly_args: missing_kwonly_args = required_kwonly_args.difference(kwargs.keys()) if missing_kwonly_args: raise TypeError("%s() missing %s keyword-only argument(s): %s" % ( getattr(wrapped, '__name__', '?'), len(missing_kwonly_args), ', '.join(sorted(missing_kwonly_args)))) if len(args) > first_kwonly_index: if varargs is None: raise TypeError("%s() takes exactly %s arguments (%s given)" % ( getattr(wrapped, '__name__', '?'), first_kwonly_index, len(args))) kwonly_args_from_kwargs = tuple(kwargs.pop(arg, default) for arg, default in kwonly_args) args = args[:first_kwonly_index] + kwonly_args_from_kwargs + args[first_kwonly_index:] return wrapped(*args, **kwargs) return update_wrapper(wrapper, wrapped) return decorate
called when a file is uploaded to a private stage for a projectname/ version. link. entry. file_exists () may be false because a more recent revision deleted the file ( and files are not revisioned ). NOTE that this hook is currently NOT called for the implicit caching uploads to the pypi mirror.
def devpiserver_on_upload(stage, project, version, link): """ called when a file is uploaded to a private stage for a projectname/version. link.entry.file_exists() may be false because a more recent revision deleted the file (and files are not revisioned). NOTE that this hook is currently NOT called for the implicit "caching" uploads to the pypi mirror. If the uploaded file is a wheel and is the latest version on this index, store its metadata in json file at the root of index/+f/ directory. With the standard config with nginx, nginx will directly serve this file. """ if link.entry and link.entry.file_exists() and link.entry.basename.endswith('.whl'): threadlog.info("Wheel detected: %s", link.entry.basename) new_version = parse_version(version) latest_version = parse_version(stage.get_latest_version_perstage(project)) if latest_version > new_version: threadlog.debug("A newer release has already been uploaded: %s - nothing to do", latest_version) return metadata = extract_metadata_from_wheel_file(link.entry.file_os_path()) linkstore = stage.get_linkstore_perstage(link.project, link.version) project_dir = '%s/%s/+f/%s' % (linkstore.filestore.storedir, stage.name, project) if not os.path.exists(project_dir): os.mkdir(project_dir) json_path = '%s/%s-%s.json' % (project_dir, project, new_version) with open(json_path, 'w') as fd: fd.write(json.dumps(metadata)) threadlog.info("Stored %s to: %s", metadata, json_path) # We symlink the latest version symlink_path = '%s.json' % project_dir if os.path.exists(symlink_path): os.unlink(symlink_path) os.symlink(json_path, symlink_path)
Args: instruction ( string ): a string that encodes 0 to n transformations of a time i. e. - 1h@h @mon + 2d + 4h... dttm ( datetime ): Returns: datetime: The datetime resulting from applying all transformations to the input datetime.
def snap(dttm, instruction): """ Args: instruction (string): a string that encodes 0 to n transformations of a time, i.e. "-1h@h", "@mon+2d+4h", ... dttm (datetime): Returns: datetime: The datetime resulting from applying all transformations to the input datetime. Example: >>> snap(datetime(2016, 1, 1, 15, 30), "-1h@h") datetime(2016, 1, 1, 14) """ transformations = parse(instruction) return reduce(lambda dt, transformation: transformation.apply_to(dt), transformations, dttm)
This function handles timezone aware datetimes. Sometimes it is necessary to keep daylight saving time switches in mind.
def snap_tz(dttm, instruction, timezone): """This function handles timezone aware datetimes. Sometimes it is necessary to keep daylight saving time switches in mind. Args: instruction (string): a string that encodes 0 to n transformations of a time, i.e. "-1h@h", "@mon+2d+4h", ... dttm (datetime): a datetime with timezone timezone: a pytz timezone Returns: datetime: The datetime resulting from applying all transformations to the input datetime. Example: >>> import pytz >>> CET = pytz.timezone("Europe/Berlin") >>> dttm = CET.localize(datetime(2017, 3, 26, 3, 44) >>> dttm datetime.datetime(2017, 3, 26, 3, 44, tzinfo=<DstTzInfo 'Europe/Berlin' CEST+2:00:00 DST>) >>> snap_tz(dttm, "-2h@h", CET) datetime.datetime(2017, 3, 26, 0, 0, tzinfo=<DstTzInfo 'Europe/Berlin' CET+1:00:00 STD>) >>> # switch from winter to summer time! """ transformations = parse(instruction) return reduce(lambda dt, transformation: transformation.apply_to_with_tz(dt, timezone), transformations, dttm)
We make sure that after truncating we use the correct timezone even if we jump over a daylight saving time switch.
def apply_to_with_tz(self, dttm, timezone): """We make sure that after truncating we use the correct timezone, even if we 'jump' over a daylight saving time switch. I.e. if we apply "@d" to `Sun Oct 30 04:30:00 CET 2016` (1477798200) we want to have `Sun Oct 30 00:00:00 CEST 2016` (1477778400) but not `Sun Oct 30 00:00:00 CET 2016` (1477782000) """ result = self.apply_to(dttm) if self.unit in [DAYS, WEEKS, MONTHS, YEARS]: naive_dttm = datetime(result.year, result.month, result.day) result = timezone.localize(naive_dttm) return result
Renders the barcode and saves it in filename.
def save(self, filename, options=None): """Renders the barcode and saves it in `filename`. :parameters: filename : String Filename to save the barcode in (without filename extension). options : Dict The same as in `self.render`. :returns: The full filename with extension. :rtype: String """ output = self.render(options) _filename = self.writer.save(filename, output) return _filename
Renders the barcode using self. writer.
def render(self, writer_options=None): """Renders the barcode using `self.writer`. :parameters: writer_options : Dict Options for `self.writer`, see writer docs for details. :returns: Output of the writers render method. """ options = Barcode.default_writer_options.copy() options.update(writer_options or {}) if options['write_text']: options['text'] = self.get_fullcode() self.writer.set_options(options) code = self.build() raw = Barcode.raw = self.writer.render(code) return raw
Calculates the checksum for EAN13 - Code.
def calculate_checksum(self): """Calculates the checksum for EAN13-Code. :returns: The checksum for `self.ean`. :rtype: Integer """ def sum_(x, y): return int(x) + int(y) evensum = reduce(sum_, self.ean[::2]) oddsum = reduce(sum_, self.ean[1::2]) return (10 - ((evensum + oddsum * 3) % 10)) % 10
Renders the barcode to whatever the inheriting writer provides using the registered callbacks.
def render(self, code): """Renders the barcode to whatever the inheriting writer provides, using the registered callbacks. :parameters: code : List List of strings matching the writer spec (only contain 0 or 1). """ if self._callbacks['initialize'] is not None: self._callbacks['initialize'](code) ypos = 1.0 for line in code: # Left quiet zone is x startposition xpos = self.quiet_zone for mod in line: if mod == '0': color = self.background else: color = self.foreground self._callbacks['paint_module'](xpos, ypos, self.module_width, color) xpos += self.module_width # Add right quiet zone to every line self._callbacks['paint_module'](xpos, ypos, self.quiet_zone, self.background) ypos += self.module_height if self.text and self._callbacks['paint_text'] is not None: ypos += self.text_distance if self.center_text: xpos = xpos / 2.0 else: xpos = self.quiet_zone + 4.0 self._callbacks['paint_text'](xpos, ypos) return self._callbacks['finish']()
[ pandas. read_sql ] Arguments: Query { [ type ] } -- [ description ] Returns: [ pd. DataFrame or generate ] -- [ description ]
def to_df(self, **kwargs): """[pandas.read_sql] Arguments: Query {[type]} -- [description] Returns: [pd.DataFrame or generate] -- [description] """ return pd.read_sql(sql=self.statement, con=self.session.bind, **kwargs)
Call that method in the pyramid configuration phase.
def connect(cls, settings): """ Call that method in the pyramid configuration phase. """ server = serializer('json').loads(settings['kvs.perlsess']) server.setdefault('key_prefix', 'perlsess::') server.setdefault('codec', 'storable') cls.cookie_name = server.pop('cookie_name', 'session_id') cls.client = KVS(**server)
Simple command line tool to help manage environment variables stored in a S3 - like system. Facilitates editing text files remotely stored as well as downloading and uploading files.
def main(ctx, edit, create): """ Simple command line tool to help manage environment variables stored in a S3-like system. Facilitates editing text files remotely stored, as well as downloading and uploading files. """ # configs this module logger to behave properly # logger messages will go to stderr (check __init__.py/patch.py) # client output should be generated with click.echo() to go to stdout try: click_log.basic_config('s3conf') logger.debug('Running main entrypoint') if edit: if ctx.invoked_subcommand is None: logger.debug('Using config file %s', config.LOCAL_CONFIG_FILE) config.ConfigFileResolver(config.LOCAL_CONFIG_FILE).edit(create=create) return else: raise UsageError('Edit should not be called with a subcommand.') # manually call help in case no relevant settings were defined if ctx.invoked_subcommand is None: click.echo(main.get_help(ctx)) except exceptions.FileDoesNotExist as e: raise UsageError('The file {} does not exist. Try "-c" option if you want to create it.'.format(str(e)))
Reads the file defined by the S3CONF variable and output its contents to stdout. Logs are printed to stderr. See options for added functionality: editing file mapping files dumping in the phusion - baseimage format etc.
def env(section, map_files, phusion, phusion_path, quiet, edit, create): """ Reads the file defined by the S3CONF variable and output its contents to stdout. Logs are printed to stderr. See options for added functionality: editing file, mapping files, dumping in the phusion-baseimage format, etc. """ try: logger.debug('Running env command') settings = config.Settings(section=section) storage = STORAGES['s3'](settings=settings) conf = s3conf.S3Conf(storage=storage, settings=settings) if edit: conf.edit(create=create) else: env_vars = conf.get_envfile().as_dict() if env_vars.get('S3CONF_MAP') and map_files: conf.download_mapping(env_vars.get('S3CONF_MAP')) if not quiet: for var_name, var_value in sorted(env_vars.items(), key=lambda x: x[0]): click.echo('{}={}'.format(var_name, var_value)) if phusion: s3conf.phusion_dump(env_vars, phusion_path) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError() except exceptions.FileDoesNotExist as e: raise UsageError('The file {} does not exist. Try "-c" option if you want to create it.'.format(str(e)))
Sets the process environemnt and executes the [ COMMAND ] in the same context. Does not modify the current shell environment.
def exec_command(ctx, section, command, map_files): """ Sets the process environemnt and executes the [COMMAND] in the same context. Does not modify the current shell environment. If the [COMMAND] has option-like arguments, use the standard POSIX pattern "--" to separate options from arguments. Considering our configuration in the "dev" section, we could write: s3conf -v info exec dev -- ping -v google.com """ try: logger.debug('Running exec command') existing_sections = config.ConfigFileResolver(config.LOCAL_CONFIG_FILE).sections() command = ' '.join(command) if section not in existing_sections: command = '{} {}'.format(section, command) if command else section section = None if not command: logger.warning('No command detected.') click.echo(exec_command.get_help(ctx)) return settings = config.Settings(section=section) storage = STORAGES['s3'](settings=settings) conf = s3conf.S3Conf(storage=storage, settings=settings) env_vars = conf.get_envfile().as_dict() if env_vars.get('S3CONF_MAP') and map_files: conf.download_mapping(env_vars.get('S3CONF_MAP')) current_env = os.environ.copy() current_env.update(env_vars) logger.debug('Executing command "%s"', command) subprocess.run(shlex.split(command), env=current_env, check=True) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
Download a file or folder from the S3 - like service.
def download(remote_path, local_path): """ Download a file or folder from the S3-like service. If REMOTE_PATH has a trailing slash it is considered to be a folder, e.g.: "s3://my-bucket/my-folder/". In this case, LOCAL_PATH must be a folder as well. The files and subfolder structure in REMOTE_PATH are copied to LOCAL_PATH. If REMOTE_PATH does not have a trailing slash, it is considered to be a file, and LOCAL_PATH should be a file as well. """ storage = STORAGES['s3']() conf = s3conf.S3Conf(storage=storage) conf.download(remote_path, local_path)
Upload a file or folder to the S3 - like service.
def upload(remote_path, local_path): """ Upload a file or folder to the S3-like service. If LOCAL_PATH is a folder, the files and subfolder structure in LOCAL_PATH are copied to REMOTE_PATH. If LOCAL_PATH is a file, the REMOTE_PATH file is created with the same contents. """ storage = STORAGES['s3']() conf = s3conf.S3Conf(storage=storage) conf.upload(local_path, remote_path)
For each section defined in the local config file creates a folder inside the local config folder named after the section. Downloads the environemnt file defined by the S3CONF variable for this section to this folder.
def downsync(section, map_files): """ For each section defined in the local config file, creates a folder inside the local config folder named after the section. Downloads the environemnt file defined by the S3CONF variable for this section to this folder. """ try: settings = config.Settings(section=section) storage = STORAGES['s3'](settings=settings) conf = s3conf.S3Conf(storage=storage, settings=settings) local_root = os.path.join(config.LOCAL_CONFIG_FOLDER, section) conf.downsync(local_root, map_files=map_files) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
For each section defined in the local config file look up for a folder inside the local config folder named after the section. Uploads the environemnt file named as in the S3CONF variable for this section to the remote S3CONF path.
def diff(section): """ For each section defined in the local config file, look up for a folder inside the local config folder named after the section. Uploads the environemnt file named as in the S3CONF variable for this section to the remote S3CONF path. """ try: settings = config.Settings(section=section) storage = STORAGES['s3'](settings=settings) conf = s3conf.S3Conf(storage=storage, settings=settings) local_root = os.path.join(config.LOCAL_CONFIG_FOLDER, section) click.echo(''.join(conf.diff(local_root))) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
Set value of a variable in an environment file for the given section. If the variable is already defined its value is replaced otherwise it is added to the end of the file. The value is given as ENV_VAR_NAME = env_var_value e. g.:
def set_variable(section, value, create): """ Set value of a variable in an environment file for the given section. If the variable is already defined, its value is replaced, otherwise, it is added to the end of the file. The value is given as "ENV_VAR_NAME=env_var_value", e.g.: s3conf set test ENV_VAR_NAME=env_var_value """ if not value: value = section section = None try: logger.debug('Running env command') settings = config.Settings(section=section) conf = s3conf.S3Conf(settings=settings) env_vars = conf.get_envfile() env_vars.set(value, create=create) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
Unset a variable in an environment file for the given section. The value is given is the variable name e. g.:
def unset_variable(section, value): """ Unset a variable in an environment file for the given section. The value is given is the variable name, e.g.: s3conf unset test ENV_VAR_NAME """ if not value: value = section section = None try: logger.debug('Running env command') settings = config.Settings(section=section) conf = s3conf.S3Conf(settings=settings) env_vars = conf.get_envfile() env_vars.unset(value) except exceptions.EnvfilePathNotDefinedError: raise exceptions.EnvfilePathNotDefinedUsageError()
Creates the. s3conf config folder and. s3conf/ config config file with the provided section name and configuration file. It is a very basic config file. Manually edit it in order to add credentials. E. g.:
def init(section, remote_file): """ Creates the .s3conf config folder and .s3conf/config config file with the provided section name and configuration file. It is a very basic config file. Manually edit it in order to add credentials. E.g.: s3conf init development s3://my-project/development.env """ if not remote_file.startswith('s3://'): raise UsageError('REMOTE_FILE must be a S3-like path. E.g.:\n\n' 's3conf init development s3://my-project/development.env') logger.debug('Running init command') config_file_path = os.path.join(os.getcwd(), '.s3conf', 'config') config_file = config.ConfigFileResolver(config_file_path, section=section) config_file.set('S3CONF', remote_file) gitignore_file_path = os.path.join(os.getcwd(), '.s3conf', '.gitignore') config_file.save() open(gitignore_file_path, 'w').write('*\n!config\n')
[ update table ]
def update(self, t_obj): """[update table] Arguments: t_obj {[objs of DeclarativeMeta]} -- [update the table] """ if isinstance(t_obj, Iterable): self._session.add_all(t_obj) else: self._session.add(t_obj)
[ insert bulk data ]
def insert(self, table, insert_obj, ignore=True): """[insert bulk data] Arguments: table {[DeclarativeMeta cls]} -- [reflection of table] insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj] Keyword Arguments: ignore {bool} -- [wether ignore exception or not] (default: {True}) Raises: ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"] Returns: [type] -- [description] """ if isinstance(insert_obj, pd.DataFrame): if insert_obj.empty: raise ValueError('The input DataFrame is empty, please check!') insert_obj = insert_obj.to_dict(orient='records') elif not isinstance(insert_obj, list): raise ValueError( f"The {reprlib.repr(insert_obj)} must be list of dicts type!") ignore_str = 'IGNORE' if ignore else '' return self._session.execute( table.__table__.insert().prefix_with(ignore_str), insert_obj)
Split a env var text like
def parse_env_var(value): """ Split a env var text like ENV_VAR_NAME=env_var_value into a tuple ('ENV_VAR_NAME', 'env_var_value') """ k, _, v = value.partition('=') # Remove any leading and trailing spaces in key, value k, v = k.strip(), v.strip().encode('unicode-escape').decode('ascii') if v and v[0] == v[-1] in ['"', "'"]: v = __escape_decoder(v[1:-1])[0] return k, v
Add basic authentication to the requests of the clients.
def basic(username, password): """Add basic authentication to the requests of the clients.""" none() _config.username = username _config.password = password
Authenticate via an api key.
def api_key(api_key): """Authenticate via an api key.""" none() _config.api_key_prefix["Authorization"] = "api-key" _config.api_key["Authorization"] = "key=" + b64encode(api_key.encode()).decode()
yield objects from json files in the folder and subfolders.
def _get_json_content_from_folder(folder): """yield objects from json files in the folder and subfolders.""" for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: if filename.lower().endswith(".json"): filepath = os.path.join(dirpath, filename) with open(filepath, "rb") as file: yield json.loads(file.read().decode("UTF-8"))
Return a dict of schema names mapping to a Schema.
def get_schemas(): """Return a dict of schema names mapping to a Schema. The schema is of type schul_cloud_resources_api_v1.schema.Schema """ schemas = {} for name in os.listdir(JSON_PATH): if name not in NO_SCHEMA: schemas[name] = Schema(name) return schemas
Return the schema.
def get_schema(self): """Return the schema.""" path = os.path.join(self._get_schema_folder(), self._name + ".json") with open(path, "rb") as file: schema = json.loads(file.read().decode("UTF-8")) return schema
Return a jsonschema. RefResolver for the schemas.
def get_resolver(self): """Return a jsonschema.RefResolver for the schemas. All schemas returned be get_schemas() are resolved locally. """ store = {} for schema in get_schemas().values(): store[schema.get_uri()] = schema.get_schema() schema = self.get_schema() return jsonschema.RefResolver.from_schema(schema, store=store)
Validate an object against the schema.
def validate(self, object): """Validate an object against the schema. This function just passes if the schema matches the object. If the object does not match the schema, a ValidationException is raised. This error allows debugging. """ resolver=self.get_resolver() jsonschema.validate(object, self.get_schema(), resolver=resolver)
Return a list of valid examples for the given schema.
def get_valid_examples(self): """Return a list of valid examples for the given schema.""" path = os.path.join(self._get_schema_folder(), "examples", "valid") return list(_get_json_content_from_folder(path))
Return a list of examples which violate the schema.
def get_invalid_examples(self): """Return a list of examples which violate the schema.""" path = os.path.join(self._get_schema_folder(), "examples", "invalid") return list(_get_json_content_from_folder(path))
Make synchronous HTTP request. Can be overidden to use different http module ( e. g. urllib2 twisted etc ).
def request( self, url, method='get', data=None, files=None, raw=False, raw_all=False, headers=dict(), raise_for=dict(), session=None ): '''Make synchronous HTTP request. Can be overidden to use different http module (e.g. urllib2, twisted, etc).''' try: import requests # import here to avoid dependency on the module except ImportError as exc: exc.args = ( 'Unable to find/import "requests" module.' ' Please make sure that it is installed, e.g. by running "pip install requests" command.' '\nFor more info, visit: http://docs.python-requests.org/en/latest/user/install/',) raise exc if not self._requests_setup_done: patched_session = self._requests_setup( requests, **(self.request_adapter_settings or dict()) ) if patched_session is not None: self._requests_session = patched_session if session is None: session = getattr(self, '_requests_session', None) if not session: session = self._requests_session = requests.session() elif not session: session = requests method = method.lower() kwz = (self._requests_base_keywords or dict()).copy() kwz.update(self.request_extra_keywords or dict()) kwz, func = dict(), ft.partial(session.request, method.upper(), **kwz) kwz_headers = (self.request_base_headers or dict()).copy() kwz_headers.update(headers) if data is not None: if method in ['post', 'put']: if all(hasattr(data, k) for k in ['seek', 'read']): # Force chunked encoding for files, as uploads hang otherwise # See https://github.com/mk-fg/python-onedrive/issues/30 for details data.seek(0) kwz['data'] = iter(ft.partial(data.read, 200 * 2**10), b'') else: kwz['data'] = data else: kwz['data'] = json.dumps(data) kwz_headers.setdefault('Content-Type', 'application/json') if files is not None: # requests-2+ doesn't seem to add default content-type header for k, file_tuple in files.iteritems(): if len(file_tuple) == 2: files[k] = tuple(file_tuple) + ('application/octet-stream',) # Rewind is necessary because request can be repeated due to auth failure file_tuple[1].seek(0) kwz['files'] = files if kwz_headers: kwz['headers'] = kwz_headers code = res = None try: res = func(url, **kwz) # log.debug('Response headers: %s', res.headers) code = res.status_code if code == requests.codes.no_content: return if code != requests.codes.ok: res.raise_for_status() except requests.RequestException as err: message = b'{0} [type: {1}, repr: {0!r}]'.format(err, type(err)) if (res and getattr(res, 'text', None)) is not None: # "res" with non-200 code can be falsy message = res.text try: message = json.loads(message) except: message = '{}: {!r}'.format(str(err), message)[:300] else: msg_err, msg_data = message.pop('error', None), message if msg_err: message = '{}: {}'.format(msg_err.get('code', err), msg_err.get('message', msg_err)) if msg_data: message = '{} (data: {})'.format(message, msg_data) raise raise_for.get(code, ProtocolError)(code, message) if raw: res = res.content elif raw_all: res = code, dict(res.headers.items()), res.content else: res = json.loads(res.text) return res
Build authorization URL for User Agent.
def auth_user_get_url(self, scope=None): 'Build authorization URL for User Agent.' if not self.client_id: raise AuthMissingError('No client_id specified') return '{}?{}'.format(self.auth_url_user, urllib.urlencode(dict( client_id=self.client_id, scope=' '.join(scope or self.auth_scope), response_type='code', redirect_uri=self.auth_redirect_uri )))
Process tokens and errors from redirect_uri.
def auth_user_process_url(self, url): 'Process tokens and errors from redirect_uri.' url = urlparse.urlparse(url) url_qs = dict(it.chain.from_iterable( urlparse.parse_qsl(v) for v in [url.query, url.fragment] )) if url_qs.get('error'): raise APIAuthError( '{} :: {}'.format(url_qs['error'], url_qs.get('error_description')) ) self.auth_code = url_qs['code'] return self.auth_code
Refresh or acquire access_token.
def auth_get_token(self, check_scope=True): 'Refresh or acquire access_token.' res = self.auth_access_data_raw = self._auth_token_request() return self._auth_token_process(res, check_scope=check_scope)
Returns id of a OneDrive user.
def get_user_id(self): 'Returns "id" of a OneDrive user.' if self._user_id is None: self._user_id = self.get_user_data()['id'] return self._user_id
Get OneDrive object representing list of objects in a folder.
def listdir(self, folder_id='me/skydrive', limit=None, offset=None): 'Get OneDrive object representing list of objects in a folder.' return self(self._api_url_join(folder_id, 'files'), dict(limit=limit, offset=offset))
Download and return a file object or a specified byte_range from it. See HTTP Range header ( rfc2616 ) for possible byte_range formats Examples: 0 - 499 - byte offsets 0 - 499 ( inclusive ) - 500 - final 500 bytes.
def get(self, obj_id, byte_range=None): '''Download and return a file object or a specified byte_range from it. See HTTP Range header (rfc2616) for possible byte_range formats, Examples: "0-499" - byte offsets 0-499 (inclusive), "-500" - final 500 bytes.''' kwz = dict() if byte_range: kwz['headers'] = dict(Range='bytes={}'.format(byte_range)) return self(self._api_url_join(obj_id, 'content'), dict(download='true'), raw=True, **kwz)
Upload a file ( object ) possibly overwriting ( default behavior ) a file with the same name attribute if it exists.
def put( self, path_or_tuple, folder_id='me/skydrive', overwrite=None, downsize=None, bits_api_fallback=True ): '''Upload a file (object), possibly overwriting (default behavior) a file with the same "name" attribute, if it exists. First argument can be either path to a local file or tuple of "(name, file)", where "file" can be either a file-like object or just a string of bytes. overwrite option can be set to False to allow two identically-named files or "ChooseNewName" to let OneDrive derive some similar unique name. Behavior of this option mimics underlying API. downsize is a true/false API flag, similar to overwrite. bits_api_fallback can be either True/False or an integer (number of bytes), and determines whether method will fall back to using BITS API (as implemented by "put_bits" method) for large files. Default "True" (bool) value will use non-BITS file size limit (api_put_max_bytes, ~100 MiB) as a fallback threshold, passing False will force using single-request uploads.''' api_overwrite = self._translate_api_flag(overwrite, 'overwrite', ['ChooseNewName']) api_downsize = self._translate_api_flag(downsize, 'downsize') name, src = self._process_upload_source(path_or_tuple) if not isinstance(bits_api_fallback, (int, float, long)): bits_api_fallback = bool(bits_api_fallback) if bits_api_fallback is not False: if bits_api_fallback is True: bits_api_fallback = self.api_put_max_bytes src.seek(0, os.SEEK_END) if src.tell() >= bits_api_fallback: if bits_api_fallback > 0: # not really a "fallback" in this case log.info( 'Falling-back to using BITS API due to file size (%.1f MiB > %.1f MiB)', *((float(v) / 2**20) for v in [src.tell(), bits_api_fallback]) ) if overwrite is not None and api_overwrite != 'true': raise NoAPISupportError( 'Passed "overwrite" flag (value: {!r})' ' is not supported by the BITS API (always "true" there)'.format(overwrite) ) if downsize is not None: log.info( 'Passed "downsize" flag (value: %r) will not' ' be used with BITS API, as it is not supported there', downsize ) file_id = self.put_bits(path_or_tuple, folder_id=folder_id) # XXX: overwrite/downsize return self.info(file_id) # PUT seem to have better support for unicode # filenames and is recommended in the API docs, see #19. # return self( self._api_url_join(folder_id, 'files'), # dict(overwrite=api_overwrite, downsize_photo_uploads=api_downsize), # method='post', files=dict(file=(name, src)) ) return self( self._api_url_join(folder_id, 'files', name), dict(overwrite=api_overwrite, downsize_photo_uploads=api_downsize), data=src, method='put', auth_header=True )
Upload a file ( object ) using BITS API ( via several http requests ) possibly overwriting ( default behavior ) a file with the same name attribute if it exists.
def put_bits( self, path_or_tuple, folder_id=None, folder_path=None, frag_bytes=None, raw_id=False, chunk_callback=None ): '''Upload a file (object) using BITS API (via several http requests), possibly overwriting (default behavior) a file with the same "name" attribute, if it exists. Unlike "put" method, uploads to "folder_path" (instead of folder_id) are supported here. Either folder path or id can be specified, but not both. Passed "chunk_callback" function (if any) will be called after each uploaded chunk with keyword parameters corresponding to upload state and BITS session info required to resume it, if necessary. Returns id of the uploaded file, as retured by the API if raw_id=True is passed, otherwise in a consistent (with other calls) "file.{user_id}.{file_id}" format (default).''' # XXX: overwrite/downsize are not documented/supported here (yet?) name, src = self._process_upload_source(path_or_tuple) if folder_id is not None and folder_path is not None: raise ValueError('Either "folder_id" or "folder_path" can be specified, but not both.') if folder_id is None and folder_path is None: folder_id = 'me/skydrive' if folder_id and re.search(r'^me(/.*)$', folder_id): folder_id = self.info(folder_id)['id'] if not frag_bytes: frag_bytes = self.api_bits_default_frag_bytes user_id = self.get_user_id() if folder_id: # workaround for API-ids inconsistency between BITS and regular API match = re.search( r'^(?i)folder.[a-f0-9]+.' '(?P<user_id>[a-f0-9]+(?P<folder_n>!\d+)?)$', folder_id ) if match and not match.group('folder_n'): # root folder is a special case and can't seem to be accessed by id folder_id, folder_path = None, '' else: if not match: raise ValueError('Failed to process folder_id for BITS API: {!r}'.format(folder_id)) folder_id = match.group('user_id') if folder_id: url = self.api_bits_url_by_id.format(folder_id=folder_id, user_id=user_id, filename=name) else: url = self.api_bits_url_by_path.format( folder_id=folder_id, user_id=user_id, file_path=ujoin(folder_path, name).lstrip('/') ) code, headers, body = self( url, method='post', auth_header=True, raw_all=True, headers={ 'X-Http-Method-Override': 'BITS_POST', 'BITS-Packet-Type': 'Create-Session', 'BITS-Supported-Protocols': self.api_bits_protocol_id }) h = lambda k,hs=dict((k.lower(), v) for k,v in headers.viewitems()): hs.get(k, '') checks = [ code == 201, h('bits-packet-type').lower() == 'ack', h('bits-protocol').lower() == self.api_bits_protocol_id.lower(), h('bits-session-id') ] if not all(checks): raise ProtocolError(code, 'Invalid BITS Create-Session response', headers, body, checks) bits_sid = h('bits-session-id') src.seek(0, os.SEEK_END) c, src_len = 0, src.tell() cn = src_len / frag_bytes if frag_bytes * cn != src_len: cn += 1 src.seek(0) for n in xrange(1, cn+1): log.debug( 'Uploading BITS fragment' ' %s / %s (max-size: %.2f MiB)', n, cn, frag_bytes / float(2**20) ) frag = BITSFragment(src, frag_bytes) c1 = c + frag_bytes self( url, method='post', raw=True, data=frag, headers={ 'X-Http-Method-Override': 'BITS_POST', 'BITS-Packet-Type': 'Fragment', 'BITS-Session-Id': bits_sid, 'Content-Range': 'bytes {}-{}/{}'.format(c, min(c1, src_len)-1, src_len) }) c = c1 if chunk_callback: chunk_callback( bytes_transferred=c, bytes_total=src_len, chunks_transferred=n, chunks_total=cn, bits_session_id=bits_sid ) if self.api_bits_auth_refresh_before_commit_hack: # As per #39 and comments under the gist with the spec, # apparently this trick fixes occasional http-5XX errors from the API self.auth_get_token() code, headers, body = self( url, method='post', auth_header=True, raw_all=True, headers={ 'X-Http-Method-Override': 'BITS_POST', 'BITS-Packet-Type': 'Close-Session', 'BITS-Session-Id': bits_sid }) h = lambda k,hs=dict((k.lower(), v) for k,v in headers.viewitems()): hs.get(k, '') checks = [code in [200, 201], h('bits-packet-type').lower() == 'ack' ] # int(h('bits-received-content-range') or 0) == src_len -- documented, but missing # h('bits-session-id') == bits_sid -- documented, but missing if not all(checks): raise ProtocolError(code, 'Invalid BITS Close-Session response', headers, body, checks) # Workaround for API-ids inconsistency between BITS and regular API file_id = h('x-resource-id') if not raw_id: file_id = 'file.{}.{}'.format(user_id, file_id) return file_id
Create a folder with a specified name attribute. folder_id allows to specify a parent folder. metadata mapping may contain additional folder properties to pass to an API.
def mkdir(self, name=None, folder_id='me/skydrive', metadata=dict()): '''Create a folder with a specified "name" attribute. folder_id allows to specify a parent folder. metadata mapping may contain additional folder properties to pass to an API.''' metadata = metadata.copy() if name: metadata['name'] = name return self(folder_id, data=metadata, method='post', auth_header=True)
Update metadata with of a specified object. See http:// msdn. microsoft. com/ en - us/ library/ live/ hh243648. aspx for the list of RW keys for each object type.
def info_update(self, obj_id, data): '''Update metadata with of a specified object. See http://msdn.microsoft.com/en-us/library/live/hh243648.aspx for the list of RW keys for each object type.''' return self(obj_id, method='put', data=data, auth_header=True)
Return a preauthenticated ( usable by anyone ) link to a specified object. Object will be considered shared by OneDrive even if link is never actually used. link_type can be either embed ( returns html ) shared_read_link or shared_edit_link.
def link(self, obj_id, link_type='shared_read_link'): '''Return a preauthenticated (usable by anyone) link to a specified object. Object will be considered "shared" by OneDrive, even if link is never actually used. link_type can be either "embed" (returns html), "shared_read_link" or "shared_edit_link".''' assert link_type in ['embed', 'shared_read_link', 'shared_edit_link'] return self(self._api_url_join(obj_id, link_type), method='get')
Copy specified file ( object ) to a folder with a given ID. Well - known folder names ( like me/ skydrive ) don t seem to work here. Folders cannot be copied ; this is an API limitation.
def copy(self, obj_id, folder_id, move=False): '''Copy specified file (object) to a folder with a given ID. Well-known folder names (like "me/skydrive") don't seem to work here. Folders cannot be copied; this is an API limitation.''' return self( obj_id, method='copy' if not move else 'move', data=dict(destination=folder_id), auth_header=True )
Move specified file ( object ) to a folder. Note that folders cannot be moved this is an API limitation.
def move(self, obj_id, folder_id): '''Move specified file (object) to a folder. Note that folders cannot be moved, this is an API limitation.''' return self.copy(obj_id, folder_id, move=True)
Add comment message to a specified object.
def comment_add(self, obj_id, message): 'Add comment message to a specified object.' return self( self._api_url_join(obj_id, 'comments'), method='post', data=dict(message=message), auth_header=True )
Return id ( or metadata ) of an object specified by chain ( iterable or fs - style path string ) of name attributes of its ancestors or raises DoesNotExists error.
def resolve_path(self, path, root_id='me/skydrive', objects=False, listdir_limit=500): '''Return id (or metadata) of an object, specified by chain (iterable or fs-style path string) of "name" attributes of its ancestors, or raises DoesNotExists error. Requires many calls to resolve each name in path, so use with care. root_id parameter allows to specify path relative to some folder_id (default: me/skydrive).''' if path: if isinstance(path, types.StringTypes): if not path.startswith('me/skydrive'): # Split path by both kinds of slashes path = filter(None, it.chain.from_iterable(p.split('\\') for p in path.split('/'))) else: root_id, path = path, None if path: try: for i, name in enumerate(path): offset = None while True: obj_list = self.listdir(root_id, offset=offset, limit=listdir_limit) try: root_id = dict(it.imap(op.itemgetter('name', 'id'), obj_list))[name] except KeyError: if len(obj_list) < listdir_limit: raise # assuming that it's the last page offset = (offset or 0) + listdir_limit else: break except (KeyError, ProtocolError) as err: if isinstance(err, ProtocolError) and err.code != 404: raise raise DoesNotExists(root_id, path[i:]) return root_id if not objects else self.info(root_id)
Return a list of objects in the specified folder_id. limit is passed to the API so might be used as optimization. type_filter can be set to type ( str ) or sequence of object types to return post - api - call processing.
def listdir(self, folder_id='me/skydrive', type_filter=None, limit=None, offset=None): '''Return a list of objects in the specified folder_id. limit is passed to the API, so might be used as optimization. type_filter can be set to type (str) or sequence of object types to return, post-api-call processing.''' lst = super(OneDriveAPI, self)\ .listdir(folder_id=folder_id, limit=limit, offset=offset)['data'] if type_filter: if isinstance(type_filter, types.StringTypes): type_filter = {type_filter} lst = list(obj for obj in lst if obj['type'] in type_filter) return lst
Copy specified file ( object ) to a folder. Note that folders cannot be copied this is an API limitation.
def copy(self, obj_id, folder_id, move=False): '''Copy specified file (object) to a folder. Note that folders cannot be copied, this is an API limitation.''' if folder_id.startswith('me/skydrive'): log.info( 'Special folder names (like "me/skydrive") dont' ' seem to work with copy/move operations, resolving it to id' ) folder_id = self.info(folder_id)['id'] return super(OneDriveAPI, self).copy(obj_id, folder_id, move=move)
Initialize instance from YAML configuration file writing updates ( only to keys specified by conf_update_keys ) back to it.
def from_conf(cls, path=None, **overrides): '''Initialize instance from YAML configuration file, writing updates (only to keys, specified by "conf_update_keys") back to it.''' from onedrive import portalocker import yaml if path is None: path = cls.conf_path_default log.debug('Using default state-file path: %r', path) path = os.path.expanduser(path) with open(path, 'rb') as src: portalocker.lock(src, portalocker.LOCK_SH) yaml_str = src.read() portalocker.unlock(src) conf = yaml.safe_load(yaml_str) conf.setdefault('conf_save', path) conf_cls = dict() for ns, keys in cls.conf_update_keys.viewitems(): for k in keys: try: v = conf.get(ns, dict()).get(k) except AttributeError: if not cls.conf_raise_structure_errors: raise raise KeyError(( 'Unable to get value for configuration parameter' ' "{k}" in section "{ns}", check configuration file (path: {path}) syntax' ' near the aforementioned section/value.' ).format(ns=ns, k=k, path=path)) if v is not None: conf_cls['{}_{}'.format(ns, k)] = conf[ns][k] conf_cls.update(overrides) # Hack to work around YAML parsing client_id of e.g. 000123 as an octal int if isinstance(conf.get('client', dict()).get('id'), (int, long)): log.warn( 'Detected client_id being parsed as an integer (as per yaml), trying to un-mangle it.' ' If requests will still fail afterwards, please replace it in the configuration file (path: %r),' ' also putting single or double quotes (either one should work) around the value.', path ) cid = conf['client']['id'] if not re.search(r'\b(0*)?{:d}\b'.format(cid), yaml_str)\ and re.search(r'\b(0*)?{:o}\b'.format(cid), yaml_str): cid = int('{:0}'.format(cid)) conf['client']['id'] = '{:016d}'.format(cid) self = cls(**conf_cls) self.conf_save = conf['conf_save'] return self
Convert or dump object to unicode.
def decode_obj(obj, force=False): 'Convert or dump object to unicode.' if isinstance(obj, unicode): return obj elif isinstance(obj, bytes): if force_encoding is not None: return obj.decode(force_encoding) if chardet: enc_guess = chardet.detect(obj) if enc_guess['confidence'] > 0.7: return obj.decode(enc_guess['encoding']) return obj.decode('utf-8') else: return obj if not force else repr(obj)
Recursively create and set the drop target for obj and childs
def set_drop_target(obj, root, designer, inspector): "Recursively create and set the drop target for obj and childs" if obj._meta.container: dt = ToolBoxDropTarget(obj, root, designer=designer, inspector=inspector) obj.drop_target = dt for child in obj: set_drop_target(child, root, designer, inspector)
Event handler tool selection ( just add to default handler )
def tool_click(self, evt): "Event handler tool selection (just add to default handler)" # get the control ctrl = self.menu_ctrl_map[evt.GetId()] # create the control on the parent: if self.inspector.selected_obj: # find the first parent drop target parent = self.inspector.selected_obj while parent.drop_target is None and parent.get_parent(): parent = parent.get_parent() # create the new object obj = ctrl(parent, name="%s_%s" % (ctrl._meta.name.lower(), wx.NewId()), pos=(0, 0), designer=self.designer) # associate the object with the toolbox: if obj._meta.container: dt = ToolBoxDropTarget(obj, self.inspector.root_obj, designer=self.designer, inspector=self.inspector) obj.drop_target = dt # fix width and height if default is not visible w, h = obj.size if w <= 10: obj.width = 100 if h <= 10: obj.height = 20 # update the object at the inspector (to show the new control) if self.inspector: self.inspector.load_object(self.inspector.root_obj) # refresh tree self.inspector.inspect(obj)
Event handler for drag&drop functionality
def start_drag_opperation(self, evt): "Event handler for drag&drop functionality" # get the control ctrl = self.menu_ctrl_map[evt.GetToolId()] # create our own data format and use it in a custom data object ldata = wx.CustomDataObject("gui") ldata.SetData(ctrl._meta.name) # only strings are allowed! # Also create a Bitmap version of the drawing bmp = ctrl._image.GetBitmap() # Now make a data object for the bitmap and also a composite # data object holding both of the others. bdata = wx.BitmapDataObject(bmp) data = wx.DataObjectComposite() data.Add(ldata) data.Add(bdata) # And finally, create the drop source and begin the drag # and drop opperation dropSource = wx.DropSource(self) dropSource.SetData(data) if DEBUG: print("Begining DragDrop\n") result = dropSource.DoDragDrop(wx.Drag_AllowMove) if DEBUG: print("DragDrop completed: %d\n" % result) if result == wx.DragMove: if DEBUG: print "dragmove!" self.Refresh()
track default top level window for toolbox menu default action
def set_default_tlw(self, tlw, designer, inspector): "track default top level window for toolbox menu default action" self.designer = designer self.inspector = inspector
Return a copy of the drop target ( to avoid wx problems on rebuild )
def copy(self): "Return a copy of the drop target (to avoid wx problems on rebuild)" return ToolBoxDropTarget(self.dv, self.root, self.designer, self.inspector)
Open the inspector windows for a given object
def inspect(obj): "Open the inspector windows for a given object" from gui.tools.inspector import InspectorTool inspector = InspectorTool() inspector.show(obj) return inspector
Open a shell
def shell(): "Open a shell" from gui.tools.debug import Shell shell = Shell() shell.show() return shell
Take a pythoncard background resource and convert to a gui2py window
def migrate_window(bg): "Take a pythoncard background resource and convert to a gui2py window" ret = {} for k, v in bg.items(): if k == 'type': v = WIN_MAP[v]._meta.name elif k == 'menubar': menus = v['menus'] v = [migrate_control(menu) for menu in menus] elif k == 'components': v = [migrate_control(comp) for comp in v] else: k = SPEC_MAP['Widget'].get(k, k) ret[k] = v return ret
Take a pythoncard background resource and convert to a gui2py window
def migrate_control(comp): "Take a pythoncard background resource and convert to a gui2py window" ret = {} for k, v in comp.items(): if k == 'type': v = CTRL_MAP[v]._meta.name elif k == 'menubar': pass elif k == 'components': v = [migrate_control(comp) for comp in v] else: k = SPEC_MAP['Widget'].get(k, k) if comp['type'] in SPEC_MAP: k = SPEC_MAP[comp['type']].get(k, k) if k == 'font': v = migrate_font(v) ret[k] = v return ret
Convert PythonCard font description to gui2py style
def migrate_font(font): "Convert PythonCard font description to gui2py style" if 'faceName' in font: font['face'] = font.pop('faceName') if 'family' in font and font['family'] == 'sansSerif': font['family'] = 'sans serif' return font
Loads HTML page from location and then displays it
def load_page(self, location): "Loads HTML page from location and then displays it" if not location: self.wx_obj.SetPage("") else: self.wx_obj.LoadPage(location)
Programatically select a ( default ) property to start editing it
def edit(self, name=""): "Programatically select a (default) property to start editing it" # for more info see DoSelectAndEdit in propgrid.cpp for name in (name, "label", "value", "text", "title", "filename", "name"): prop = self.pg.GetPropertyByName(name) if prop is not None: break self.Parent.SetFocus() self.Parent.Raise() self.pg.SetFocus() # give time to the ui to show the prop grid and set focus: wx.CallLater(250, self.select, prop.GetName())
Select a property ( and start the editor )
def select(self, name, flags=0): "Select a property (and start the editor)" # do not call this directly from another window, use edit() instead # // wxPropertyGrid::DoSelectProperty flags (selFlags) -see propgrid.h- wxPG_SEL_FOCUS=0x0001 # Focuses to created editor wxPG_SEL_FORCE=0x0002 # Forces deletion and recreation of editor flags |= wxPG_SEL_FOCUS # | wxPG_SEL_FORCE prop = self.pg.GetPropertyByName(name) self.pg.SelectProperty(prop, flags) if DEBUG: print "selected!", prop