_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q7900
RTMP.handle_packet
train
def handle_packet(self, packet): """Lets librtmp look at a packet and send a response if needed.""" if not isinstance(packet, RTMPPacket): raise ValueError("A RTMPPacket argument is required") return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
python
{ "resource": "" }
q7901
RTMP.process_packets
train
def process_packets(self, transaction_id=None, invoked_method=None, timeout=None): """Wait for packets and process them as needed. :param transaction_id: int, Wait until the result of this transaction ID is recieved. :param invoked_method: int, Wait until this method is invoked by the server. :param timeout: int, The time to wait for a result from the server. Note: This is the timeout used by this method only, the connection timeout is still used when reading packets. Raises :exc:`RTMPError` on error. Raises :exc:`RTMPTimeoutError` on timeout. Usage:: >>> @conn.invoke_handler ... def add(x, y): ... return x + y >>> @conn.process_packets() """ start = time() while self.connected and transaction_id not in self._invoke_results: if timeout and (time() - start) >= timeout: raise RTMPTimeoutError("Timeout") packet = self.read_packet() if packet.type == PACKET_TYPE_INVOKE: try: decoded = decode_amf(packet.body) except AMFError: continue try: method, transaction_id_, obj = decoded[:3] args = decoded[3:] except ValueError: continue if method == "_result": if len(args) > 0: result = args[0] else: result = None self._invoke_results[transaction_id_] = result else: handler = self._invoke_handlers.get(method) if handler: res = handler(*args) if res is not None: self.call("_result", res, transaction_id=transaction_id_) if method == invoked_method: self._invoke_args[invoked_method] = args break if transaction_id_ == 1.0: self._connect_result = packet else: self.handle_packet(packet) else: self.handle_packet(packet) if transaction_id: result = self._invoke_results.pop(transaction_id, None) return result if invoked_method: args = self._invoke_args.pop(invoked_method, None) return args
python
{ "resource": "" }
q7902
RTMP.call
train
def call(self, method, *args, **params): """Calls a method on the server.""" transaction_id = params.get("transaction_id") if not transaction_id: self.transaction_id += 1 transaction_id = self.transaction_id obj = params.get("obj") args = [method, transaction_id, obj] + list(args) args_encoded = map(lambda x: encode_amf(x), args) body = b"".join(args_encoded) format = params.get("format", PACKET_SIZE_MEDIUM) channel = params.get("channel", 0x03) packet = RTMPPacket(type=PACKET_TYPE_INVOKE, format=format, channel=channel, body=body) self.send_packet(packet) return RTMPCall(self, transaction_id)
python
{ "resource": "" }
q7903
RTMP.remote_method
train
def remote_method(self, method, block=False, **params): """Creates a Python function that will attempt to call a remote method when used. :param method: str, Method name on the server to call :param block: bool, Wheter to wait for result or not Usage:: >>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True) >>> send_usher_token("some token") 'Token Accepted' """ def func(*args): call = self.call(method, *args, **params) if block: return call.result() return call func.__name__ = method return func
python
{ "resource": "" }
q7904
RTMPCall.result
train
def result(self, timeout=None): """Retrieves the result of the call. :param timeout: The time to wait for a result from the server. Raises :exc:`RTMPTimeoutError` on timeout. """ if self.done: return self._result result = self.conn.process_packets(transaction_id=self.transaction_id, timeout=timeout) self._result = result self.done = True return result
python
{ "resource": "" }
q7905
add_signal_handler
train
def add_signal_handler(): """Adds a signal handler to handle KeyboardInterrupt.""" import signal def handler(sig, frame): if sig == signal.SIGINT: librtmp.RTMP_UserInterrupt() raise KeyboardInterrupt signal.signal(signal.SIGINT, handler)
python
{ "resource": "" }
q7906
BaseMesh.rotate_x
train
def rotate_x(self, deg): """Rotate mesh around x-axis :param float deg: Rotation angle (degree) :return: """ rad = math.radians(deg) mat = numpy.array([ [1, 0, 0, 0], [0, math.cos(rad), math.sin(rad), 0], [0, -math.sin(rad), math.cos(rad), 0], [0, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
python
{ "resource": "" }
q7907
BaseMesh.translate_x
train
def translate_x(self, d): """Translate mesh for x-direction :param float d: Amount to translate """ mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [d, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
python
{ "resource": "" }
q7908
BaseMesh.translate_y
train
def translate_y(self, d): """Translate mesh for y-direction :param float d: Amount to translate """ mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, d, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
python
{ "resource": "" }
q7909
BaseMesh.translate_z
train
def translate_z(self, d): """Translate mesh for z-direction :param float d: Amount to translate """ mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, d, 1] ]) self.vectors = self.vectors.dot(mat) return self
python
{ "resource": "" }
q7910
Stl.__load
train
def __load(fh, mode=MODE_AUTO): """Load Mesh from STL file :param FileIO fh: The file handle to open :param int mode: The mode to open, default is :py:data:`AUTOMATIC`. :return: """ header = fh.read(Stl.HEADER_SIZE).lower() name = "" data = None if not header.strip(): return if mode in (Stl.MODE_AUTO, Stl.MODE_ASCII) and header.startswith('solid'): try: name = header.split('\n', 1)[0][:5].strip() data = Stl.__load_ascii(fh, header) mode = Stl.MODE_ASCII except: pass else: data = Stl.__load_binary(fh) mode = Stl.MODE_BINARY return name, data, mode
python
{ "resource": "" }
q7911
initialize_logger
train
def initialize_logger(): """Initialize steppy logger. This logger is used throughout the steppy library to report computation progress. Example: Simple use of steppy logger: .. code-block:: python initialize_logger() logger = get_logger() logger.info('My message inside pipeline') result looks like this: .. code:: 2018-06-02 12:33:48 steppy >>> My message inside pipeline Returns: logging.Logger: logger object formatted in the steppy style """ logger = logging.getLogger('steppy') logger.setLevel(logging.INFO) message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s', datefmt='%Y-%m-%d %H:%M:%S') # console handler console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_handler.setFormatter(fmt=message_format) # add the handlers to the logger logger.addHandler(console_handler) return logger
python
{ "resource": "" }
q7912
display_upstream_structure
train
def display_upstream_structure(structure_dict): """Displays pipeline structure in the jupyter notebook. Args: structure_dict (dict): dict returned by :func:`~steppy.base.Step.upstream_structure`. """ graph = _create_graph(structure_dict) plt = Image(graph.create_png()) display(plt)
python
{ "resource": "" }
q7913
persist_as_png
train
def persist_as_png(structure_dict, filepath): """Saves pipeline diagram to disk as png file. Args: structure_dict (dict): dict returned by :func:`~steppy.base.Step.upstream_structure` filepath (str): filepath to which the png with pipeline visualization should be persisted """ graph = _create_graph(structure_dict) graph.write(filepath, format='png')
python
{ "resource": "" }
q7914
_create_graph
train
def _create_graph(structure_dict): """Creates pydot graph from the pipeline structure dict. Args: structure_dict (dict): dict returned by step.upstream_structure Returns: graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step). """ graph = pydot.Dot() for node in structure_dict['nodes']: graph.add_node(pydot.Node(node)) for node1, node2 in structure_dict['edges']: graph.add_edge(pydot.Edge(node1, node2)) return graph
python
{ "resource": "" }
q7915
Adapter.adapt
train
def adapt(self, all_ouputs: AllOutputs) -> DataPacket: """Adapt inputs for the transformer included in the step. Args: all_ouputs: Dict of outputs from parent steps. The keys should match the names of these steps and the values should be their respective outputs. Returns: Dictionary with the same keys as `adapting_recipes` and values constructed according to the respective recipes. """ adapted = {} for name, recipe in self.adapting_recipes.items(): adapted[name] = self._construct(all_ouputs, recipe) return adapted
python
{ "resource": "" }
q7916
Step.fit_transform
train
def fit_transform(self, data): """Fit the model and transform data or load already processed data. Loads cached or persisted output or adapts data for the current transformer and executes ``transformer.fit_transform``. Args: data (dict): data dictionary with keys as input names and values as dictionaries of key-value pairs that can be passed to the ``self.transformer.fit_transform`` method. Example: .. code-block:: python data = {'input_1': {'X': X, 'y': y}, 'input_2': {'X': X, 'y': y} } Returns: dict: Step output from the ``self.transformer.fit_transform`` method """ if data: assert isinstance(data, dict), 'Step {}, "data" argument in the "fit_transform()" method must be dict, ' \ 'got {} instead.'.format(self.name, type(data)) logger.info('Step {}, working in "{}" mode'.format(self.name, self._mode)) if self._mode == 'inference': ValueError('Step {}, you are in "{}" mode, where you cannot run "fit".' 'Please change mode to "train" to enable fitting.' 'Use: "step.set_mode_train()" then "step.fit_transform()"'.format(self.name, self._mode)) if self.output_is_cached and not self.force_fitting: logger.info('Step {} using cached output'.format(self.name)) step_output_data = self.output elif self.output_is_persisted and self.load_persisted_output and not self.force_fitting: logger.info('Step {} loading persisted output from {}'.format(self.name, self.experiment_directory_output_step)) step_output_data = self._load_output(self.experiment_directory_output_step) else: step_inputs = {} if self.input_data is not None: for input_data_part in self.input_data: step_inputs[input_data_part] = data[input_data_part] for input_step in self.input_steps: step_inputs[input_step.name] = input_step.fit_transform(data) if self.adapter: step_inputs = self._adapt(step_inputs) else: step_inputs = self._unpack(step_inputs) step_output_data = self._fit_transform_operation(step_inputs) logger.info('Step {}, fit and transform completed'.format(self.name)) return step_output_data
python
{ "resource": "" }
q7917
Step.clean_cache_step
train
def clean_cache_step(self): """Clean cache for current step. """ logger.info('Step {}, cleaning cache'.format(self.name)) self.output = None return self
python
{ "resource": "" }
q7918
Step.clean_cache_upstream
train
def clean_cache_upstream(self): """Clean cache for all steps that are upstream to `self`. """ logger.info('Cleaning cache for the entire upstream pipeline') for step in self.all_upstream_steps.values(): logger.info('Step {}, cleaning cache'.format(step.name)) step.output = None return self
python
{ "resource": "" }
q7919
Step.get_step_by_name
train
def get_step_by_name(self, name): """Extracts step by name from the pipeline. Extracted Step is a fully functional pipeline as well. All upstream Steps are already defined. Args: name (str): name of the step to be fetched Returns: Step (obj): extracted step """ self._validate_step_name(name) name = str(name) try: return self.all_upstream_steps[name] except KeyError as e: msg = 'No Step with name "{}" found. ' \ 'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys())) raise StepError(msg) from e
python
{ "resource": "" }
q7920
Step.persist_upstream_structure
train
def persist_upstream_structure(self): """Persist json file with the upstream steps structure, that is step names and their connections.""" persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name)) logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir)) joblib.dump(self.upstream_structure, persist_dir)
python
{ "resource": "" }
q7921
Step.persist_upstream_diagram
train
def persist_upstream_diagram(self, filepath): """Creates upstream steps diagram and persists it to disk as png file. Pydot graph is created and persisted to disk as png file under the filepath directory. Args: filepath (str): filepath to which the png with steps visualization should be persisted """ assert isinstance(filepath, str),\ 'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath)) persist_as_png(self.upstream_structure, filepath)
python
{ "resource": "" }
q7922
BaseTransformer.fit_transform
train
def fit_transform(self, *args, **kwargs): """Performs fit followed by transform. This method simply combines fit and transform. Args: args: positional arguments (can be anything) kwargs: keyword arguments (can be anything) Returns: dict: output """ self.fit(*args, **kwargs) return self.transform(*args, **kwargs)
python
{ "resource": "" }
q7923
generate_hotp
train
def generate_hotp(secret, counter=4): """Generate a HOTP code. :param secret: A secret token for the authentication. :param counter: HOTP is a counter based algorithm. """ # https://tools.ietf.org/html/rfc4226 msg = struct.pack('>Q', counter) digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest() ob = digest[19] if PY2: ob = ord(ob) pos = ob & 15 base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff token = base % 1000000 return token
python
{ "resource": "" }
q7924
generate_totp
train
def generate_totp(secret, period=30, timestamp=None): """Generate a TOTP code. A TOTP code is an extension of HOTP algorithm. :param secret: A secret token for the authentication. :param period: A period that a TOTP code is valid in seconds :param timestamp: Current time stamp. """ if timestamp is None: timestamp = time.time() counter = int(timestamp) // period return generate_hotp(secret, counter)
python
{ "resource": "" }
q7925
OtpAuth.valid_hotp
train
def valid_hotp(self, code, last=0, trials=100): """Valid a HOTP code. :param code: A number that is less than 6 characters. :param last: Guess HOTP code from last + 1 range. :param trials: Guest HOTP code end at last + trials + 1. """ if not valid_code(code): return False code = bytes(int(code)) for i in range(last + 1, last + trials + 1): if compare_digest(bytes(self.hotp(counter=i)), code): return i return False
python
{ "resource": "" }
q7926
OtpAuth.valid_totp
train
def valid_totp(self, code, period=30, timestamp=None): """Valid a TOTP code. :param code: A number that is less than 6 characters. :param period: A period that a TOTP code is valid in seconds :param timestamp: Validate TOTP at this given timestamp """ if not valid_code(code): return False return compare_digest( bytes(self.totp(period, timestamp)), bytes(int(code)) )
python
{ "resource": "" }
q7927
OtpAuth.to_uri
train
def to_uri(self, type, label, issuer, counter=None): """Generate the otpauth protocal string. :param type: Algorithm type, hotp or totp. :param label: Label of the identifier. :param issuer: The company, the organization or something else. :param counter: Counter of the HOTP algorithm. """ type = type.lower() if type not in ('hotp', 'totp'): raise ValueError('type must be hotp or totp') if type == 'hotp' and not counter: raise ValueError('HOTP type authentication need counter') # https://code.google.com/p/google-authenticator/wiki/KeyUriFormat url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s' '&issuer=%(issuer)s') dct = dict( type=type, label=label, issuer=issuer, secret=self.encoded_secret, counter=counter ) ret = url % dct if type == 'hotp': ret = '%s&counter=%s' % (ret, counter) return ret
python
{ "resource": "" }
q7928
OtpAuth.to_google
train
def to_google(self, type, label, issuer, counter=None): """Generate the otpauth protocal string for Google Authenticator. .. deprecated:: 0.2.0 Use :func:`to_uri` instead. """ warnings.warn('deprecated, use to_uri instead', DeprecationWarning) return self.to_uri(type, label, issuer, counter)
python
{ "resource": "" }
q7929
ExchangeRates.install
train
def install(self, backend='money.exchange.SimpleBackend'): """Install an exchange rates backend using a python path string""" # RADAR: Python2 if isinstance(backend, money.six.string_types): path, name = backend.rsplit('.', 1) module = importlib.import_module(path) backend = getattr(module, name)() elif isinstance(backend, type): backend = backend() if not isinstance(backend, BackendBase): raise TypeError("backend '{}' is not a subclass of " "money.xrates.BackendBase".format(backend)) self._backend = backend
python
{ "resource": "" }
q7930
ExchangeRates.rate
train
def rate(self, currency): """Return quotation between the base and another currency""" if not self._backend: raise ExchangeBackendNotInstalled() return self._backend.rate(currency)
python
{ "resource": "" }
q7931
_register_scatter
train
def _register_scatter(): """ Patch `PathCollection` and `scatter` to register their return values. This registration allows us to distinguish `PathCollection`s created by `Axes.scatter`, which should use point-like picking, from others, which should use path-like picking. The former is more common, so we store the latter instead; this also lets us guess the type better if this module is imported late. """ @functools.wraps(PathCollection.__init__) def __init__(self, *args, **kwargs): _nonscatter_pathcollections.add(self) return __init__.__wrapped__(self, *args, **kwargs) PathCollection.__init__ = __init__ @functools.wraps(Axes.scatter) def scatter(*args, **kwargs): paths = scatter.__wrapped__(*args, **kwargs) with suppress(KeyError): _nonscatter_pathcollections.remove(paths) return paths Axes.scatter = scatter
python
{ "resource": "" }
q7932
_call_with_selection
train
def _call_with_selection(func): """Decorator that passes a `Selection` built from the non-kwonly args.""" wrapped_kwonly_params = [ param for param in inspect.signature(func).parameters.values() if param.kind == param.KEYWORD_ONLY] sel_sig = inspect.signature(Selection) default_sel_sig = sel_sig.replace( parameters=[param.replace(default=None) if param.default is param.empty else param for param in sel_sig.parameters.values()]) @functools.wraps(func) def wrapper(*args, **kwargs): extra_kw = {param.name: kwargs.pop(param.name) for param in wrapped_kwonly_params if param.name in kwargs} ba = default_sel_sig.bind(*args, **kwargs) # apply_defaults ba.arguments = ChainMap( ba.arguments, {name: param.default for name, param in default_sel_sig.parameters.items() if param.default is not param.empty}) sel = Selection(*ba.args, **ba.kwargs) return func(sel, **extra_kw) wrapper.__signature__ = Signature( list(sel_sig.parameters.values()) + wrapped_kwonly_params) return wrapper
python
{ "resource": "" }
q7933
_set_valid_props
train
def _set_valid_props(artist, kwargs): """Set valid properties for the artist, dropping the others.""" artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)}) return artist
python
{ "resource": "" }
q7934
Money.to
train
def to(self, currency): """Return equivalent money object in another currency""" if currency == self._currency: return self rate = xrates.quotation(self._currency, currency) if rate is None: raise ExchangeRateNotFound(xrates.backend_name, self._currency, currency) amount = self._amount * rate return self.__class__(amount, currency)
python
{ "resource": "" }
q7935
_get_rounded_intersection_area
train
def _get_rounded_intersection_area(bbox_1, bbox_2): """Compute the intersection area between two bboxes rounded to 8 digits.""" # The rounding allows sorting areas without floating point issues. bbox = bbox_1.intersection(bbox_1, bbox_2) return round(bbox.width * bbox.height, 8) if bbox else 0
python
{ "resource": "" }
q7936
cursor
train
def cursor(pickables=None, **kwargs): """ Create a `Cursor` for a list of artists, containers, and axes. Parameters ---------- pickables : Optional[List[Union[Artist, Container, Axes, Figure]]] All artists and containers in the list or on any of the axes or figures passed in the list are selectable by the constructed `Cursor`. Defaults to all artists and containers on any of the figures that :mod:`~matplotlib.pyplot` is tracking. Note that the latter will only work when relying on pyplot, not when figures are directly instantiated (e.g., when manually embedding Matplotlib in a GUI toolkit). **kwargs Keyword arguments are passed to the `Cursor` constructor. """ if pickables is None: # Do not import pyplot ourselves to avoid forcing the backend. plt = sys.modules.get("matplotlib.pyplot") pickables = [ plt.figure(num) for num in plt.get_fignums()] if plt else [] elif (isinstance(pickables, Container) or not isinstance(pickables, Iterable)): pickables = [pickables] def iter_unpack_figures(pickables): for entry in pickables: if isinstance(entry, Figure): yield from entry.axes else: yield entry def iter_unpack_axes(pickables): for entry in pickables: if isinstance(entry, Axes): yield from _iter_axes_subartists(entry) containers.extend(entry.containers) elif isinstance(entry, Container): containers.append(entry) else: yield entry containers = [] artists = list(iter_unpack_axes(iter_unpack_figures(pickables))) for container in containers: contained = list(filter(None, container.get_children())) for artist in contained: with suppress(ValueError): artists.remove(artist) if contained: artists.append(_pick_info.ContainerArtist(container)) return Cursor(artists, **kwargs)
python
{ "resource": "" }
q7937
Cursor.selections
train
def selections(self): r"""The tuple of current `Selection`\s.""" for sel in self._selections: if sel.annotation.axes is None: raise RuntimeError("Annotation unexpectedly removed; " "use 'cursor.remove_selection' instead") return tuple(self._selections)
python
{ "resource": "" }
q7938
Cursor.add_selection
train
def add_selection(self, pi): """ Create an annotation for a `Selection` and register it. Returns a new `Selection`, that has been registered by the `Cursor`, with the added annotation set in the :attr:`annotation` field and, if applicable, the highlighting artist in the :attr:`extras` field. Emits the ``"add"`` event with the new `Selection` as argument. When the event is emitted, the position of the annotation is temporarily set to ``(nan, nan)``; if this position is not explicitly set by a callback, then a suitable position will be automatically computed. Likewise, if the text alignment is not explicitly set but the position is, then a suitable alignment will be automatically computed. """ # pi: "pick_info", i.e. an incomplete selection. # Pre-fetch the figure and axes, as callbacks may actually unset them. figure = pi.artist.figure axes = pi.artist.axes if axes.get_renderer_cache() is None: figure.canvas.draw() # Needed by draw_artist below anyways. renderer = pi.artist.axes.get_renderer_cache() ann = pi.artist.axes.annotate( _pick_info.get_ann_text(*pi), xy=pi.target, xytext=(np.nan, np.nan), ha=_MarkedStr("center"), va=_MarkedStr("center"), visible=self.visible, **self.annotation_kwargs) ann.draggable(use_blit=not self._multiple) extras = [] if self._highlight: hl = self.add_highlight(*pi) if hl: extras.append(hl) sel = pi._replace(annotation=ann, extras=extras) self._selections.append(sel) for cb in self._callbacks["add"]: cb(sel) # Check that `ann.axes` is still set, as callbacks may have removed the # annotation. if ann.axes and ann.xyann == (np.nan, np.nan): fig_bbox = figure.get_window_extent() ax_bbox = axes.get_window_extent() overlaps = [] for idx, annotation_position in enumerate( self.annotation_positions): ann.set(**annotation_position) # Work around matplotlib/matplotlib#7614: position update is # missing. ann.update_positions(renderer) bbox = ann.get_window_extent(renderer) overlaps.append( (_get_rounded_intersection_area(fig_bbox, bbox), _get_rounded_intersection_area(ax_bbox, bbox), # Avoid needlessly jumping around by breaking ties using # the last used position as default. idx == self._last_auto_position)) auto_position = max(range(len(overlaps)), key=overlaps.__getitem__) ann.set(**self.annotation_positions[auto_position]) self._last_auto_position = auto_position else: if isinstance(ann.get_ha(), _MarkedStr): ann.set_ha({-1: "right", 0: "center", 1: "left"}[ np.sign(np.nan_to_num(ann.xyann[0]))]) if isinstance(ann.get_va(), _MarkedStr): ann.set_va({-1: "top", 0: "center", 1: "bottom"}[ np.sign(np.nan_to_num(ann.xyann[1]))]) if (extras or len(self.selections) > 1 and not self._multiple or not figure.canvas.supports_blit): # Either: # - there may be more things to draw, or # - annotation removal will make a full redraw necessary, or # - blitting is not (yet) supported. figure.canvas.draw_idle() elif ann.axes: # Fast path, only needed if the annotation has not been immediately # removed. figure.draw_artist(ann) # Explicit argument needed on MacOSX backend. figure.canvas.blit(figure.bbox) # Removal comes after addition so that the fast blitting path works. if not self._multiple: for sel in self.selections[:-1]: self.remove_selection(sel) return sel
python
{ "resource": "" }
q7939
Cursor.add_highlight
train
def add_highlight(self, artist, *args, **kwargs): """ Create, add, and return a highlighting artist. This method is should be called with an "unpacked" `Selection`, possibly with some fields set to None. It is up to the caller to register the artist with the proper `Selection` (by calling ``sel.extras.append`` on the result of this method) in order to ensure cleanup upon deselection. """ hl = _pick_info.make_highlight( artist, *args, **ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs)) if hl: artist.axes.add_artist(hl) return hl
python
{ "resource": "" }
q7940
Cursor.connect
train
def connect(self, event, func=None): """ Connect a callback to a `Cursor` event; return the callback. Two events can be connected to: - callbacks connected to the ``"add"`` event are called when a `Selection` is added, with that selection as only argument; - callbacks connected to the ``"remove"`` event are called when a `Selection` is removed, with that selection as only argument. This method can also be used as a decorator:: @cursor.connect("add") def on_add(sel): ... Examples of callbacks:: # Change the annotation text and alignment: lambda sel: sel.annotation.set( text=sel.artist.get_label(), # or use e.g. sel.target.index ha="center", va="bottom") # Make label non-draggable: lambda sel: sel.draggable(False) """ if event not in self._callbacks: raise ValueError("{!r} is not a valid cursor event".format(event)) if func is None: return partial(self.connect, event) self._callbacks[event].append(func) return func
python
{ "resource": "" }
q7941
Cursor.disconnect
train
def disconnect(self, event, cb): """ Disconnect a previously connected callback. If a callback is connected multiple times, only one connection is removed. """ try: self._callbacks[event].remove(cb) except KeyError: raise ValueError("{!r} is not a valid cursor event".format(event)) except ValueError: raise ValueError("Callback {} is not registered".format(event))
python
{ "resource": "" }
q7942
Cursor.remove
train
def remove(self): """ Remove a cursor. Remove all `Selection`\\s, disconnect all callbacks, and allow the cursor to be garbage collected. """ for disconnectors in self._disconnectors: disconnectors() for sel in self.selections: self.remove_selection(sel) for s in type(self)._keep_alive.values(): with suppress(KeyError): s.remove(self)
python
{ "resource": "" }
q7943
Cursor.remove_selection
train
def remove_selection(self, sel): """Remove a `Selection`.""" self._selections.remove(sel) # <artist>.figure will be unset so we save them first. figures = {artist.figure for artist in [sel.annotation] + sel.extras} # ValueError is raised if the artist has already been removed. with suppress(ValueError): sel.annotation.remove() for artist in sel.extras: with suppress(ValueError): artist.remove() for cb in self._callbacks["remove"]: cb(sel) for figure in figures: figure.canvas.draw_idle()
python
{ "resource": "" }
q7944
BaseHandler.request_access_token
train
def request_access_token(self, access_code): "Request access token from GitHub" token_response = request_session.post( "https://github.com/login/oauth/access_token", data={ "client_id": self.oauth_client_id, "client_secret": self.oauth_client_secret, "code": access_code }, headers={"Accept": "application/json"}, ) return helper_request_access_token(token_response.json())
python
{ "resource": "" }
q7945
combine_dicts
train
def combine_dicts(recs): """Combine a list of recs, appending values to matching keys""" if not recs: return None if len(recs) == 1: return recs.pop() new_rec = {} for rec in recs: for k, v in rec.iteritems(): if k in new_rec: new_rec[k] = "%s, %s" % (new_rec[k], v) else: new_rec[k] = v return new_rec
python
{ "resource": "" }
q7946
combine_recs
train
def combine_recs(rec_list, key): """Use a common key to combine a list of recs""" final_recs = {} for rec in rec_list: rec_key = rec[key] if rec_key in final_recs: for k, v in rec.iteritems(): if k in final_recs[rec_key] and final_recs[rec_key][k] != v: raise Exception("Mis-match for key '%s'" % k) final_recs[rec_key][k] = v else: final_recs[rec_key] = rec return final_recs.values()
python
{ "resource": "" }
q7947
HostFromTarball._load_from_file
train
def _load_from_file(self, filename): """Find filename in tar, and load it""" if filename in self.fdata: return self.fdata[filename] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
python
{ "resource": "" }
q7948
parse_extlang
train
def parse_extlang(subtags): """ Parse an 'extended language' tag, which consists of 1 to 3 three-letter language codes. Extended languages are used for distinguishing dialects/sublanguages (depending on your view) of macrolanguages such as Arabic, Bahasa Malay, and Chinese. It's supposed to also be acceptable to just use the sublanguage as the primary language code, and your code should know what's a macrolanguage of what. For example, 'zh-yue' and 'yue' are the same language (Cantonese), and differ only in whether they explicitly spell out that Cantonese is a kind of Chinese. """ index = 0 parsed = [] while index < len(subtags) and len(subtags[index]) == 3 and index < 3: parsed.append(('extlang', subtags[index])) index += 1 return parsed + parse_subtags(subtags[index:], SCRIPT)
python
{ "resource": "" }
q7949
order_error
train
def order_error(subtag, got, expected): """ Output an error indicating that tags were out of order. """ options = SUBTAG_TYPES[expected:] if len(options) == 1: expect_str = options[0] elif len(options) == 2: expect_str = '%s or %s' % (options[0], options[1]) else: expect_str = '%s, or %s' % (', '.join(options[:-1]), options[-1]) got_str = SUBTAG_TYPES[got] raise LanguageTagError("This %s subtag, %r, is out of place. " "Expected %s." % (got_str, subtag, expect_str))
python
{ "resource": "" }
q7950
tag_match_score
train
def tag_match_score(desired: {str, Language}, supported: {str, Language}) -> int: """ Return a number from 0 to 100 indicating the strength of match between the language the user desires, D, and a supported language, S. Higher numbers are better. A reasonable cutoff for not messing with your users is to only accept scores of 75 or more. A score of 100 means the languages are the same, possibly after normalizing and filling in likely values. >>> tag_match_score('en', 'en') 100 >>> tag_match_score('en', 'en-US') 100 >>> tag_match_score('zh-Hant', 'zh-TW') 100 >>> tag_match_score('ru-Cyrl', 'ru') 100 >>> # Serbo-Croatian is a politically contentious idea, but in practice >>> # it's considered equivalent to Serbian in Latin characters. >>> tag_match_score('sh', 'sr-Latn') 100 A score of 92 to 97 indicates a regional difference. >>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao 97 >>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English 96 >>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English 96 >>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish 96 >>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different 94 >>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish 92 >>> # Serbian has two scripts, and people might prefer one but understand both >>> tag_match_score('sr-Latn', 'sr-Cyrl') 95 >>> # European Portuguese is different from the most common form (Brazilian Portuguese) >>> tag_match_score('pt', 'pt-PT') 92 A score of 86 to 90 indicates that people who use the desired language are demographically likely to understand the supported language, even if the languages themselves are unrelated. There are many languages that have a one-way connection of this kind to English or French. >>> tag_match_score('ta', 'en') # Tamil to English 86 >>> tag_match_score('mg', 'fr') # Malagasy to French 86 Sometimes it's more straightforward than that: people who use the desired language are demographically likely to understand the supported language because it's demographically relevant and highly related. >>> tag_match_score('af', 'nl') # Afrikaans to Dutch 86 >>> tag_match_score('ms', 'id') # Malay to Indonesian 86 >>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål 90 >>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish 88 A score of 80 to 85 indicates a particularly contentious difference in script, where people who understand one script can learn the other but probably won't be happy with it. This specifically applies to Chinese. >>> tag_match_score('zh-Hans', 'zh-Hant') 85 >>> tag_match_score('zh-CN', 'zh-HK') 85 >>> tag_match_score('zh-CN', 'zh-TW') 85 >>> tag_match_score('zh-Hant', 'zh-Hans') 81 >>> tag_match_score('zh-TW', 'zh-CN') 81 When the supported script is a different one than desired, this is usually a major difference with score of 60 or less. >>> tag_match_score('ja', 'ja-Latn-US-hepburn') 56 >>> # You can read the Shavian script, right? >>> tag_match_score('en', 'en-Shaw') 56 When there is no indication the supported language will be understood, the score will be 20 or less, to a minimum of 0. >>> tag_match_score('es', 'fr') # Spanish and French are different. 16 >>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil. 0 CLDR doesn't take into account which languages are considered part of a common 'macrolanguage'. We have this data, so we can use it in matching. If two languages have no other rule that would allow them to match, but share a macrolanguage, they'll get a match score of 20 less than what they would get if the language matched. >>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic 80 >>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic 76 Here's an example that has script, region, and language differences, but a macrolanguage in common. Written Chinese is usually presumed to be Mandarin Chinese, but colloquial Cantonese can be written as well. When it is, it probably has region, script, and language differences from the usual mainland Chinese. But it is still part of the 'Chinese' macrolanguage, so there is more similarity than, say, comparing Mandarin to Hindi. >>> tag_match_score('yue', 'zh') 36 Comparing Swiss German ('gsw') to standardized German ('de') shows how these scores can be asymmetrical. Swiss German speakers will understand German, so the score in that direction is 92. Most German speakers find Swiss German unintelligible, and CLDR in fact assigns this a score of 16. This seems a little bit extreme, but the asymmetry is certainly there. And if your text is tagged as 'gsw', it must be that way for a reason. >>> tag_match_score('gsw', 'de') 92 >>> tag_match_score('de', 'gsw') 16 """ desired_ld = Language.get(desired) supported_ld = Language.get(supported) return desired_ld.match_score(supported_ld)
python
{ "resource": "" }
q7951
best_match
train
def best_match(desired_language: {str, Language}, supported_languages: list, min_score: int=75) -> (str, int): """ You have software that supports any of the `supported_languages`. You want to use `desired_language`. This function lets you choose the right language, even if there isn't an exact match. Returns: - The best-matching language code, which will be one of the `supported_languages` or 'und' - The score of the match, from 0 to 100 `min_score` sets the minimum match score. If all languages match with a lower score than that, the result will be 'und' with a score of 0. When there is a tie for the best matching language, the first one in the tie will be used. Setting `min_score` lower will enable more things to match, at the cost of possibly mis-handling data or upsetting users. Read the documentation for :func:`tag_match_score` to understand what the numbers mean. >>> best_match('fr', ['de', 'en', 'fr']) ('fr', 100) >>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl']) ('sr-Latn', 100) >>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan']) ('zh-Hans', 100) >>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan']) ('cmn-Hans', 100) >>> best_match('pt', ['pt-BR', 'pt-PT']) ('pt-BR', 100) >>> best_match('en-AU', ['en-GB', 'en-US']) ('en-GB', 96) >>> best_match('es-MX', ['es-ES', 'es-419', 'en-US']) ('es-419', 96) >>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY']) ('es-PU', 95) >>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY']) ('es-AR', 95) >>> best_match('zsm', ['id', 'mhp']) ('id', 86) >>> best_match('eu', ['el', 'en', 'es']) ('es', 90) >>> best_match('eu', ['el', 'en', 'es'], min_score=92) ('und', 0) """ # Quickly return if the desired language is directly supported if desired_language in supported_languages: return desired_language, 100 # Reduce the desired language to a standard form that could also match desired_language = standardize_tag(desired_language) if desired_language in supported_languages: return desired_language, 100 match_scores = [ (supported, tag_match_score(desired_language, supported)) for supported in supported_languages ] match_scores = [ (supported, score) for (supported, score) in match_scores if score >= min_score ] + [('und', 0)] match_scores.sort(key=lambda item: -item[1]) return match_scores[0]
python
{ "resource": "" }
q7952
Language.make
train
def make(cls, language=None, extlangs=None, script=None, region=None, variants=None, extensions=None, private=None): """ Create a Language object by giving any subset of its attributes. If this value has been created before, return the existing value. """ values = (language, tuple(extlangs or ()), script, region, tuple(variants or ()), tuple(extensions or ()), private) if values in cls._INSTANCES: return cls._INSTANCES[values] instance = cls( language=language, extlangs=extlangs, script=script, region=region, variants=variants, extensions=extensions, private=private ) cls._INSTANCES[values] = instance return instance
python
{ "resource": "" }
q7953
Language.get
train
def get(tag: {str, 'Language'}, normalize=True) -> 'Language': """ Create a Language object from a language tag string. If normalize=True, non-standard or overlong tags will be replaced as they're interpreted. This is recommended. Here are several examples of language codes, which are also test cases. Most language codes are straightforward, but these examples will get pretty obscure toward the end. >>> Language.get('en-US') Language.make(language='en', region='US') >>> Language.get('zh-Hant') Language.make(language='zh', script='Hant') >>> Language.get('und') Language.make() This function is idempotent, in case you already have a Language object: >>> Language.get(Language.get('en-us')) Language.make(language='en', region='US') The non-code 'root' is sometimes used to represent the lack of any language information, similar to 'und'. >>> Language.get('root') Language.make() By default, getting a Language object will automatically convert deprecated tags: >>> Language.get('iw') Language.make(language='he') >>> Language.get('in') Language.make(language='id') One type of deprecated tag that should be replaced is for sign languages, which used to all be coded as regional variants of a fictitious global sign language called 'sgn'. Of course, there is no global sign language, so sign languages now have their own language codes. >>> Language.get('sgn-US') Language.make(language='ase') >>> Language.get('sgn-US', normalize=False) Language.make(language='sgn', region='US') 'en-gb-oed' is a tag that's grandfathered into the standard because it has been used to mean "spell-check this with Oxford English Dictionary spelling", but that tag has the wrong shape. We interpret this as the new standardized tag 'en-gb-oxendict', unless asked not to normalize. >>> Language.get('en-gb-oed') Language.make(language='en', region='GB', variants=['oxendict']) >>> Language.get('en-gb-oed', normalize=False) Language.make(language='en-gb-oed') 'zh-min-nan' is another oddly-formed tag, used to represent the Southern Min language, which includes Taiwanese as a regional form. It now has its own language code. >>> Language.get('zh-min-nan') Language.make(language='nan') There's not much we can do with the vague tag 'zh-min': >>> Language.get('zh-min') Language.make(language='zh-min') Occasionally Wiktionary will use 'extlang' tags in strange ways, such as using the tag 'und-ibe' for some unspecified Iberian language. >>> Language.get('und-ibe') Language.make(extlangs=['ibe']) Here's an example of replacing multiple deprecated tags. The language tag 'sh' (Serbo-Croatian) ended up being politically problematic, and different standards took different steps to address this. The IANA made it into a macrolanguage that contains 'sr', 'hr', and 'bs'. Unicode further decided that it's a legacy tag that should be interpreted as 'sr-Latn', which the language matching rules say is mutually intelligible with all those languages. We complicate the example by adding on the region tag 'QU', an old provisional tag for the European Union, which is now standardized as 'EU'. >>> Language.get('sh-QU') Language.make(language='sr', script='Latn', region='EU') """ if isinstance(tag, Language): if not normalize: # shortcut: we have the tag already return tag # We might need to normalize this tag. Convert it back into a # string tag, to cover all the edge cases of normalization in a # way that we've already solved. tag = tag.to_tag() if (tag, normalize) in Language._PARSE_CACHE: return Language._PARSE_CACHE[tag, normalize] data = {} # if the complete tag appears as something to normalize, do the # normalization right away. Smash case when checking, because the # case normalization that comes from parse_tag() hasn't been applied # yet. tag_lower = tag.lower() if normalize and tag_lower in LANGUAGE_REPLACEMENTS: tag = LANGUAGE_REPLACEMENTS[tag_lower] components = parse_tag(tag) for typ, value in components: if typ == 'extlang' and normalize and 'language' in data: # smash extlangs when possible minitag = '%s-%s' % (data['language'], value) norm = LANGUAGE_REPLACEMENTS.get(minitag.lower()) if norm is not None: data.update( Language.get(norm, normalize).to_dict() ) else: data.setdefault('extlangs', []).append(value) elif typ in {'extlang', 'variant', 'extension'}: data.setdefault(typ + 's', []).append(value) elif typ == 'language': if value == 'und': pass elif normalize: replacement = LANGUAGE_REPLACEMENTS.get(value.lower()) if replacement is not None: # parse the replacement if necessary -- this helps with # Serbian and Moldovan data.update( Language.get(replacement, normalize).to_dict() ) else: data['language'] = value else: data['language'] = value elif typ == 'region': if normalize: data['region'] = REGION_REPLACEMENTS.get(value.lower(), value) else: data['region'] = value elif typ == 'grandfathered': # If we got here, we got a grandfathered tag but we were asked # not to normalize it, or the CLDR data doesn't know how to # normalize it. The best we can do is set the entire tag as the # language. data['language'] = value else: data[typ] = value result = Language.make(**data) Language._PARSE_CACHE[tag, normalize] = result return result
python
{ "resource": "" }
q7954
Language.simplify_script
train
def simplify_script(self) -> 'Language': """ Remove the script from some parsed language data, if the script is redundant with the language. >>> Language.make(language='en', script='Latn').simplify_script() Language.make(language='en') >>> Language.make(language='yi', script='Latn').simplify_script() Language.make(language='yi', script='Latn') >>> Language.make(language='yi', script='Hebr').simplify_script() Language.make(language='yi') """ if self._simplified is not None: return self._simplified if self.language and self.script: if DEFAULT_SCRIPTS.get(self.language) == self.script: result = self.update_dict({'script': None}) self._simplified = result return self._simplified self._simplified = self return self._simplified
python
{ "resource": "" }
q7955
Language.assume_script
train
def assume_script(self) -> 'Language': """ Fill in the script if it's missing, and if it can be assumed from the language subtag. This is the opposite of `simplify_script`. >>> Language.make(language='en').assume_script() Language.make(language='en', script='Latn') >>> Language.make(language='yi').assume_script() Language.make(language='yi', script='Hebr') >>> Language.make(language='yi', script='Latn').assume_script() Language.make(language='yi', script='Latn') This fills in nothing when the script cannot be assumed -- such as when the language has multiple scripts, or it has no standard orthography: >>> Language.make(language='sr').assume_script() Language.make(language='sr') >>> Language.make(language='eee').assume_script() Language.make(language='eee') It also dosn't fill anything in when the language is unspecified. >>> Language.make(region='US').assume_script() Language.make(region='US') """ if self._assumed is not None: return self._assumed if self.language and not self.script: try: self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]}) except KeyError: self._assumed = self else: self._assumed = self return self._assumed
python
{ "resource": "" }
q7956
Language.prefer_macrolanguage
train
def prefer_macrolanguage(self) -> 'Language': """ BCP 47 doesn't specify what to do with macrolanguages and the languages they contain. The Unicode CLDR, on the other hand, says that when a macrolanguage has a dominant standardized language, the macrolanguage code should be used for that language. For example, Mandarin Chinese is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'. This isn't a rule you'd want to follow in all cases -- for example, you may want to be able to specifically say that 'ms' (the Malay macrolanguage) contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying this rule helps when interoperating with the Unicode CLDR. So, applying `prefer_macrolanguage` to a Language object will return a new object, replacing the language with the macrolanguage if it is the dominant language within that macrolanguage. It will leave non-dominant languages that have macrolanguages alone. >>> Language.get('arb').prefer_macrolanguage() Language.make(language='ar') >>> Language.get('cmn-Hant').prefer_macrolanguage() Language.make(language='zh', script='Hant') >>> Language.get('yue-Hant').prefer_macrolanguage() Language.make(language='yue', script='Hant') """ if self._macrolanguage is not None: return self._macrolanguage language = self.language or 'und' if language in NORMALIZED_MACROLANGUAGES: self._macrolanguage = self.update_dict({ 'language': NORMALIZED_MACROLANGUAGES[language] }) else: self._macrolanguage = self return self._macrolanguage
python
{ "resource": "" }
q7957
Language.broaden
train
def broaden(self) -> 'List[Language]': """ Iterate through increasingly general versions of this parsed language tag. This isn't actually that useful for matching two arbitrary language tags against each other, but it is useful for matching them against a known standardized form, such as in the CLDR data. The list of broader versions to try appears in UTR 35, section 4.3, "Likely Subtags". >>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden(): ... print(langdata) nn-Latn-NO-x-thingy nn-Latn-NO nn-NO nn-Latn nn und-Latn und """ if self._broader is not None: return self._broader self._broader = [self] seen = set(self.to_tag()) for keyset in self.BROADER_KEYSETS: filtered = self._filter_attributes(keyset) tag = filtered.to_tag() if tag not in seen: self._broader.append(filtered) seen.add(tag) return self._broader
python
{ "resource": "" }
q7958
Language.maximize
train
def maximize(self) -> 'Language': """ The Unicode CLDR contains a "likelySubtags" data file, which can guess reasonable values for fields that are missing from a language tag. This is particularly useful for comparing, for example, "zh-Hant" and "zh-TW", two common language tags that say approximately the same thing via rather different information. (Using traditional Han characters is not the same as being in Taiwan, but each implies that the other is likely.) These implications are provided in the CLDR supplemental data, and are based on the likelihood of people using the language to transmit information on the Internet. (This is why the overall default is English, not Chinese.) >>> str(Language.get('zh-Hant').maximize()) 'zh-Hant-TW' >>> str(Language.get('zh-TW').maximize()) 'zh-Hant-TW' >>> str(Language.get('ja').maximize()) 'ja-Jpan-JP' >>> str(Language.get('pt').maximize()) 'pt-Latn-BR' >>> str(Language.get('und-Arab').maximize()) 'ar-Arab-EG' >>> str(Language.get('und-CH').maximize()) 'de-Latn-CH' >>> str(Language.make().maximize()) # 'MURICA. 'en-Latn-US' >>> str(Language.get('und-ibe').maximize()) 'en-ibe-Latn-US' """ if self._filled is not None: return self._filled for broader in self.broaden(): tag = broader.to_tag() if tag in LIKELY_SUBTAGS: result = Language.get(LIKELY_SUBTAGS[tag], normalize=False) result = result.update(self) self._filled = result return result raise RuntimeError( "Couldn't fill in likely values. This represents a problem with " "the LIKELY_SUBTAGS data." )
python
{ "resource": "" }
q7959
Language.script_name
train
def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str: """ Describe the script part of the language tag in a natural language. """ return self._get_name('script', language, min_score)
python
{ "resource": "" }
q7960
Language.region_name
train
def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str: """ Describe the region part of the language tag in a natural language. """ return self._get_name('region', language, min_score)
python
{ "resource": "" }
q7961
Language.variant_names
train
def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list: """ Describe each of the variant parts of the language tag in a natural language. """ names = [] for variant in self.variants: var_names = code_to_names('variant', variant) names.append(self._best_name(var_names, language, min_score)) return names
python
{ "resource": "" }
q7962
Language.describe
train
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict: """ Return a dictionary that describes a given language tag in a specified natural language. See `language_name` and related methods for more specific versions of this. The desired `language` will in fact be matched against the available options using the matching technique that this module provides. We can illustrate many aspects of this by asking for a description of Shavian script (a script devised by author George Bernard Shaw), and where you might find it, in various languages. >>> from pprint import pprint >>> shaw = Language.make(script='Shaw').maximize() >>> pprint(shaw.describe('en')) {'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'} >>> pprint(shaw.describe('fr')) {'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'} >>> pprint(shaw.describe('es')) {'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('pt')) {'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('uk')) {'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'} >>> pprint(shaw.describe('arb')) {'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'} >>> pprint(shaw.describe('th')) {'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'} >>> pprint(shaw.describe('zh-Hans')) {'language': '英语', 'region': '英国', 'script': '萧伯纳式文'} >>> pprint(shaw.describe('zh-Hant')) {'language': '英文', 'region': '英國', 'script': '簫柏納字符'} >>> pprint(shaw.describe('ja')) {'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'} When we don't have a localization for the language, we fall back on 'und', which just shows the language codes. >>> pprint(shaw.describe('lol')) {'language': 'en', 'region': 'GB', 'script': 'Shaw'} Wait, is that a real language? >>> pprint(Language.get('lol').maximize().describe()) {'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'} """ names = {} if self.language: names['language'] = self.language_name(language, min_score) if self.script: names['script'] = self.script_name(language, min_score) if self.region: names['region'] = self.region_name(language, min_score) if self.variants: names['variants'] = self.variant_names(language, min_score) return names
python
{ "resource": "" }
q7963
Language.find_name
train
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None): """ Find the subtag of a particular `tagtype` that has the given `name`. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language. In a previous version, we thought we were going to deprecate the `language` parameter, as there weren't significant cases of conflicts in names of things between languages. Well, we got more data, and conflicts in names are everywhere. Specifying the language that the name should be in is still not required, but it will help to make sure that names can be round-tripped. >>> Language.find_name('language', 'francés') Language.make(language='fr') >>> Language.find_name('region', 'United Kingdom') Language.make(region='GB') >>> Language.find_name('script', 'Arabic') Language.make(script='Arab') >>> Language.find_name('language', 'norsk bokmål') Language.make(language='nb') >>> Language.find_name('language', 'norsk') Language.make(language='no') >>> Language.find_name('language', 'norsk', 'en') Traceback (most recent call last): ... LookupError: Can't find any language named 'norsk' >>> Language.find_name('language', 'norsk', 'no') Language.make(language='no') >>> Language.find_name('language', 'malayo', 'en') Language.make(language='mbp') >>> Language.find_name('language', 'malayo', 'es') Language.make(language='ms') Some langauge names resolve to more than a language. For example, the name 'Brazilian Portuguese' resolves to a language and a region, and 'Simplified Chinese' resolves to a language and a script. In these cases, a Language object with multiple subtags will be returned. >>> Language.find_name('language', 'Brazilian Portuguese', 'en') Language.make(language='pt', region='BR') >>> Language.find_name('language', 'Simplified Chinese', 'en') Language.make(language='zh', script='Hans') A small amount of fuzzy matching is supported: if the name can be shortened to match a single language name, you get that language. This allows, for example, "Hakka dialect" to match "Hakka". >>> Language.find_name('language', 'Hakka dialect') Language.make(language='hak') """ # No matter what form of language we got, normalize it to a single # language subtag if isinstance(language, Language): language = language.language elif isinstance(language, str): language = get(language).language if language is None: language = 'und' code = name_to_code(tagtype, name, language) if code is None: raise LookupError("Can't find any %s named %r" % (tagtype, name)) if '-' in code: return Language.get(code) else: data = {tagtype: code} return Language.make(**data)
python
{ "resource": "" }
q7964
Language.to_dict
train
def to_dict(self): """ Get a dictionary of the attributes of this Language object, which can be useful for constructing a similar object. """ if self._dict is not None: return self._dict result = {} for key in self.ATTRIBUTES: value = getattr(self, key) if value: result[key] = value self._dict = result return result
python
{ "resource": "" }
q7965
Language.update
train
def update(self, other: 'Language') -> 'Language': """ Update this Language with the fields of another Language. """ return Language.make( language=other.language or self.language, extlangs=other.extlangs or self.extlangs, script=other.script or self.script, region=other.region or self.region, variants=other.variants or self.variants, extensions=other.extensions or self.extensions, private=other.private or self.private )
python
{ "resource": "" }
q7966
Language.update_dict
train
def update_dict(self, newdata: dict) -> 'Language': """ Update the attributes of this Language from a dictionary. """ return Language.make( language=newdata.get('language', self.language), extlangs=newdata.get('extlangs', self.extlangs), script=newdata.get('script', self.script), region=newdata.get('region', self.region), variants=newdata.get('variants', self.variants), extensions=newdata.get('extensions', self.extensions), private=newdata.get('private', self.private) )
python
{ "resource": "" }
q7967
Language._filter_keys
train
def _filter_keys(d: dict, keys: set) -> dict: """ Select a subset of keys from a dictionary. """ return {key: d[key] for key in keys if key in d}
python
{ "resource": "" }
q7968
Language._filter_attributes
train
def _filter_attributes(self, keyset): """ Return a copy of this object with a subset of its attributes set. """ filtered = self._filter_keys(self.to_dict(), keyset) return Language.make(**filtered)
python
{ "resource": "" }
q7969
Language._searchable_form
train
def _searchable_form(self) -> 'Language': """ Convert a parsed language tag so that the information it contains is in the best form for looking up information in the CLDR. """ if self._searchable is not None: return self._searchable self._searchable = self._filter_attributes( {'language', 'script', 'region'} ).simplify_script().prefer_macrolanguage() return self._searchable
python
{ "resource": "" }
q7970
read_cldr_names
train
def read_cldr_names(path, language, category): """ Read CLDR's names for things in a particular language. """ filename = data_filename('{}/{}/{}.json'.format(path, language, category)) fulldata = json.load(open(filename, encoding='utf-8')) data = fulldata['main'][language]['localeDisplayNames'][category] return data
python
{ "resource": "" }
q7971
parse_file
train
def parse_file(file): """ Take an open file containing the IANA subtag registry, and yield a dictionary of information for each subtag it describes. """ lines = [] for line in file: line = line.rstrip('\n') if line == '%%': # This is a separator between items. Parse the data we've # collected and yield the result. yield from parse_item(lines) lines.clear() elif line.startswith(' '): # This is a continuation line. Concatenate it to the previous # line, including one of the spaces. lines[-1] += line[1:] else: lines.append(line) yield from parse_item(lines)
python
{ "resource": "" }
q7972
load_trie
train
def load_trie(filename): """ Load a BytesTrie from the marisa_trie on-disk format. """ trie = marisa_trie.BytesTrie() # marisa_trie raises warnings that make no sense. Ignore them. with warnings.catch_warnings(): warnings.simplefilter("ignore") trie.load(filename) return trie
python
{ "resource": "" }
q7973
name_to_code
train
def name_to_code(category, name, language: str='und'): """ Get a language, script, or region by its name in some language. The language here must be a string representing a language subtag only. The `Language.find` method can handle other representations of a language and normalize them to this form. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". A small amount of fuzzy matching is supported: if the name can be shortened or lengthened to match a single language name, you get that language. This allows, for example, "Hakka Chinese" to match "Hakka". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language. """ assert '/' not in language, "Language codes cannot contain slashes" assert '-' not in language, "This code should be reduced to a language subtag only" trie_name = '{}/name_to_{}'.format(language, category) if trie_name not in TRIES: TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name))) trie = TRIES[trie_name] lookup = normalize_name(name) if lookup in trie: return get_trie_value(trie, lookup) else: # Is this a language plus extra junk? Maybe it has "...isch", "... language", # or "... Chinese" attached to it, for example. prefixes = trie.prefixes(lookup) if prefixes and len(prefixes[-1]) >= 4: return get_trie_value(trie, prefixes[-1]) else: return None
python
{ "resource": "" }
q7974
code_to_names
train
def code_to_names(category, code): """ Given the code for a language, script, or region, get a dictionary of its names in various languages. """ trie_name = '{}_to_name'.format(category) if trie_name not in TRIES: TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name))) trie = TRIES[trie_name] lookup = code.lower() + '@' possible_keys = trie.keys(lookup) names = {} for key in possible_keys: target_language = key.split('@')[1] names[target_language] = get_trie_value(trie, key) return names
python
{ "resource": "" }
q7975
print_huffman_code_cwl
train
def print_huffman_code_cwl(code,p,v): """ code - code dictionary with symbol -> code map, p, v is probability map """ cwl = 0.0 for k,_v in code.items(): print(u"%s -> %s"%(k,_v)) cwl += p[v.index(k)]*len(_v) print(u"cwl = %g"%cwl) return cwl,code.values()
python
{ "resource": "" }
q7976
oridam_generate_patterns
train
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None): """ ed = 1 by default, pos - internal variable for algorithm """ alternates = cm.get(word_in[pos],[]) if not candidates: candidates = [] assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]' if (pos >len(word_in)) or ed == 0: return candidates pfx = '' sfx = '' curr_candidates = [] for p in range(0,pos): pfx = pfx + word_in[p] for p in range(pos+1,len(word_in)): sfx = sfx + word_in[p] for alt in alternates: word_alt = pfx + alt + sfx if not (word_alt in candidates): candidates.append( word_alt ) curr_candidates.append( word_alt ) for n_pos in range(pos,len(word_in)): # already what we have ' candidates ' of this round are edit-distance 1 for word in curr_candidates: oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates) if level == 0: #candidates.append(word_in) for n_pos in range(pos,len(word_in)): oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates) return candidates
python
{ "resource": "" }
q7977
uyirmei_constructed
train
def uyirmei_constructed( mei_idx, uyir_idx): """ construct uyirmei letter give mei index and uyir index """ idx,idy = mei_idx,uyir_idx assert ( idy >= 0 and idy < uyir_len() ) assert ( idx >= 0 and idx < 6+mei_len() ) return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx]
python
{ "resource": "" }
q7978
has_english
train
def has_english( word_in ): """ return True if word_in has any English letters in the string""" return not all_tamil(word_in) and len(word_in) > 0 and any([l in word_in for l in string.ascii_letters])
python
{ "resource": "" }
q7979
all_tamil
train
def all_tamil( word_in ): """ predicate checks if all letters of the input word are Tamil letters """ if isinstance(word_in,list): word = word_in else: word = get_letters( word_in ) return all( [(letter in tamil_letters) for letter in word] )
python
{ "resource": "" }
q7980
compare_words_lexicographic
train
def compare_words_lexicographic( word_a, word_b ): """ compare words in Tamil lexicographic order """ # sanity check for words to be all Tamil if ( not all_tamil(word_a) ) or (not all_tamil(word_b)) : #print("## ") #print(word_a) #print(word_b) #print("Both operands need to be Tamil words") pass La = len(word_a) Lb = len(word_b) all_TA_letters = u"".join(tamil_letters) for itr in range(0,min(La,Lb)): pos1 = all_TA_letters.find( word_a[itr] ) pos2 = all_TA_letters.find( word_b[itr] ) if pos1 != pos2 : #print not( pos1 > pos2), pos1, pos2 return cmp(pos1, pos2) # result depends on if La is shorter than Lb, or 0 if La == Lb i.e. cmp return cmp(La,Lb)
python
{ "resource": "" }
q7981
word_intersection
train
def word_intersection( word_a, word_b ): """ return a list of tuples where word_a, word_b intersect """ positions = [] word_a_letters = get_letters( word_a ) word_b_letters = get_letters( word_b ) for idx,wa in enumerate(word_a_letters): for idy,wb in enumerate(word_b_letters): if ( wa == wb ): positions.append( (idx, idy) ) return positions
python
{ "resource": "" }
q7982
splitMeiUyir
train
def splitMeiUyir(uyirmei_char): """ This function split uyirmei compound character into mei + uyir characters and returns in tuple. Input : It must be unicode tamil char. Written By : Arulalan.T Date : 22.09.2014 """ if not isinstance(uyirmei_char, PYTHON3 and str or unicode): raise ValueError("Passed input letter '%s' must be unicode, \ not just string" % uyirmei_char) if uyirmei_char in mei_letters or uyirmei_char in uyir_letters or uyirmei_char in ayudha_letter: return uyirmei_char if uyirmei_char not in grantha_uyirmei_letters: if not is_normalized( uyirmei_char ): norm_char = unicode_normalize(uyirmei_char) rval = splitMeiUyir( norm_char ) return rval raise ValueError("Passed input letter '%s' is not tamil letter" % uyirmei_char) idx = grantha_uyirmei_letters.index(uyirmei_char) uyiridx = idx % 12 meiidx = int((idx - uyiridx)/ 12) return (grantha_mei_letters[meiidx], uyir_letters[uyiridx])
python
{ "resource": "" }
q7983
joinMeiUyir
train
def joinMeiUyir(mei_char, uyir_char): """ This function join mei character and uyir character, and retuns as compound uyirmei unicode character. Inputs: mei_char : It must be unicode tamil mei char. uyir_char : It must be unicode tamil uyir char. Written By : Arulalan.T Date : 22.09.2014 """ if not mei_char: return uyir_char if not uyir_char: return mei_char if not isinstance(mei_char, PYTHON3 and str or unicode): raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char) if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None: raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char) if mei_char not in grantha_mei_letters: raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char) if uyir_char not in uyir_letters: raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char) if uyir_char: uyiridx = uyir_letters.index(uyir_char) else: return mei_char meiidx = grantha_mei_letters.index(mei_char) # calculate uyirmei index uyirmeiidx = meiidx*12 + uyiridx return grantha_uyirmei_letters[uyirmeiidx]
python
{ "resource": "" }
q7984
make_pattern
train
def make_pattern( patt, flags=0 ): """ returns a compile regular expression object """ # print('input',len(patt)) patt_letters = utf8.get_letters( patt ) patt_out = list() idx = 0 # print('output',len(patt_letters)) patt = [None,None] prev = None LEN_PATT = len(patt_letters) while( idx < LEN_PATT ): if utf8.istamil( patt_letters[idx] ) and ( prev == '-' or ((idx+1) < LEN_PATT and patt_letters[idx+1] == u'-') ): if (idx+1) < LEN_PATT and patt_letters[idx+1] == u'-': patt[0] = patt_letters[idx] idx = idx + 2 prev = "-" elif prev == '-': patt[1] = patt_letters[idx] patt_out.extend( expand_tamil( patt[0], patt[1]) ) idx = idx + 1 prev = patt_letters[idx] continue patt_out.extend( patt_letters[idx] ) prev = patt_letters[idx] idx = idx + 1 opattern = u"".join( patt_out ) compile_regexp = re.compile( opattern, flags ) return (compile_regexp,opattern)
python
{ "resource": "" }
q7985
WordXSec.compute
train
def compute( self ): # compute the intersection graph into @xsections dictionary wordlist = self.wordlist """ build a dictionary of words, and their intersections """ xsections = {} for i in range(len(wordlist)): word_i = wordlist[i] for j in range(len(wordlist)): word_j = wordlist[j] if i == j: # force self-intersection to be 0 if not xsections.get(word_i,None): xsections[word_i] = [''] else: xsections[word_i].extend(['']) continue # optimize for, i > j, info is calculated already if i > j: xsec_counts = xsections[word_j][i] else: xsec_counts = tamil.utf8.word_intersection( word_i, word_j ) if not xsections.get(word_i,None): xsections[word_i] = [xsec_counts] else: xsections[word_i].extend( [ xsec_counts ] ) self.xsections = xsections
python
{ "resource": "" }
q7986
BaseStemmer.set_current
train
def set_current(self, value): ''' Set the self.current string. ''' self.current = value self.cursor = 0 self.limit = len(self.current) self.limit_backward = 0 self.bra = self.cursor self.ket = self.limit
python
{ "resource": "" }
q7987
BaseStemmer.replace_s
train
def replace_s(self, c_bra, c_ket, s): ''' to replace chars between c_bra and c_ket in self.current by the chars in s. @type c_bra int @type c_ket int @type s: string ''' adjustment = len(s) - (c_ket - c_bra) self.current = self.current[0:c_bra] + s + self.current[c_ket:] self.limit += adjustment if self.cursor >= c_ket: self.cursor += adjustment elif self.cursor > c_bra: self.cursor = c_bra return adjustment
python
{ "resource": "" }
q7988
BaseStemmer.slice_to
train
def slice_to(self, s): ''' Copy the slice into the supplied StringBuffer @type s: string ''' result = '' if self.slice_check(): result = self.current[self.bra:self.ket] return result
python
{ "resource": "" }
q7989
TamilTweetParser.getTamilWords
train
def getTamilWords( tweet ): """" word needs to all be in the same tamil language """ tweet = TamilTweetParser.cleanupPunct( tweet ); nonETwords = filter( lambda x: len(x) > 0 , re.split(r'\s+',tweet) );#|"+|\'+|#+ tamilWords = filter( TamilTweetParser.isTamilPredicate, nonETwords ); return tamilWords
python
{ "resource": "" }
q7990
validate_split_runs_file
train
def validate_split_runs_file(split_runs_file): """Check if structure of file is as expected and return dictionary linking names to run_IDs.""" try: content = [l.strip() for l in split_runs_file.readlines()] if content[0].upper().split('\t') == ['NAME', 'RUN_ID']: return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c} else: sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'") except IndexError: sys.exit("ERROR: Format of --split_runs tab separated file not as expected") logging.error("ERROR: Format of --split_runs tab separated file not as expected")
python
{ "resource": "" }
q7991
change_identifiers
train
def change_identifiers(datadf, split_dict): """Change the dataset identifiers based on the names in the dictionary.""" for rid, name in split_dict.items(): datadf.loc[datadf["runIDs"] == rid, "dataset"] = name
python
{ "resource": "" }
q7992
MCP9808.begin
train
def begin(self): """Start taking temperature measurements. Returns True if the device is intialized, False otherwise. """ # Check manufacturer and device ID match expected values. mid = self._device.readU16BE(MCP9808_REG_MANUF_ID) did = self._device.readU16BE(MCP9808_REG_DEVICE_ID) self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid)) self._logger.debug('Read device ID: {0:04X}'.format(did)) return mid == 0x0054 and did == 0x0400
python
{ "resource": "" }
q7993
MCP9808.readTempC
train
def readTempC(self): """Read sensor and return its value in degrees celsius.""" # Read temperature register value. t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP) self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF)) # Scale and convert to signed value. temp = (t & 0x0FFF) / 16.0 if t & 0x1000: temp -= 256.0 return temp
python
{ "resource": "" }
q7994
crud_url_name
train
def crud_url_name(model, action, prefix=None): """ Returns url name for given model and action. """ if prefix is None: prefix = "" app_label = model._meta.app_label model_lower = model.__name__.lower() return '%s%s_%s_%s' % (prefix, app_label, model_lower, action)
python
{ "resource": "" }
q7995
crud_url
train
def crud_url(instance_or_model, action, prefix=None, additional_kwargs=None): """Shortcut function returns URL for instance or model and action. Example:: crud_url(author, 'update') Is same as: reverse('testapp_author_update', kwargs={'pk': author.pk}) Example:: crud_url(Author, 'update') Is same as: reverse('testapp_author_list') """ if additional_kwargs is None: additional_kwargs = {} if isinstance(instance_or_model, Model): additional_kwargs['pk'] = instance_or_model.pk model_name = instance_or_model._meta.model else: model_name = instance_or_model return reverse( crud_url_name(model_name, action, prefix), kwargs=additional_kwargs )
python
{ "resource": "" }
q7996
format_value
train
def format_value(obj, field_name): """ Simple value formatting. If value is model instance returns link to detail view if exists. """ display_func = getattr(obj, 'get_%s_display' % field_name, None) if display_func: return display_func() value = getattr(obj, field_name) if isinstance(value, models.fields.files.FieldFile): if value: return mark_safe('<a href="%s">%s</a>' % ( value.url, os.path.basename(value.name), )) else: return '' if isinstance(value, models.Model): return format_value_instance(value) if isinstance(value, models.Manager): return mark_safe(', '.join( [format_value_instance(instance) for instance in value.all()] )) if value is None: value = "" return value
python
{ "resource": "" }
q7997
get_fields
train
def get_fields(model, fields=None): """ Assigns fields for model. """ include = [f.strip() for f in fields.split(',')] if fields else None return utils.get_fields( model, include )
python
{ "resource": "" }
q7998
crud_urls
train
def crud_urls(model, list_view=None, create_view=None, update_view=None, detail_view=None, delete_view=None, url_prefix=None, name_prefix=None, list_views=None, **kwargs): """Returns a list of url patterns for model. :param list_view: :param create_view: :param update_view: :param detail_view: :param delete_view: :param url_prefix: prefix to prepend, default is `'$'` :param name_prefix: prefix to prepend to name, default is empty string :param list_views(dict): additional list views :param **kwargs: additional detail views :returns: urls """ if url_prefix is None: url_prefix = r'^' urls = [] if list_view: urls.append(url( url_prefix + '$', list_view, name=utils.crud_url_name(model, utils.ACTION_LIST, name_prefix) )) if create_view: urls.append(url( url_prefix + r'new/$', create_view, name=utils.crud_url_name(model, utils.ACTION_CREATE, name_prefix) )) if detail_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/$', detail_view, name=utils.crud_url_name(model, utils.ACTION_DETAIL, name_prefix) )) if update_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/edit/$', update_view, name=utils.crud_url_name(model, utils.ACTION_UPDATE, name_prefix) )) if delete_view: urls.append(url( url_prefix + r'(?P<pk>\d+)/remove/$', delete_view, name=utils.crud_url_name(model, utils.ACTION_DELETE, name_prefix) )) if list_views is not None: for name, view in list_views.items(): urls.append(url( url_prefix + r'%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) for name, view in kwargs.items(): urls.append(url( url_prefix + r'(?P<pk>\d+)/%s/$' % name, view, name=utils.crud_url_name(model, name, name_prefix) )) return urls
python
{ "resource": "" }
q7999
crud_for_model
train
def crud_for_model(model, urlprefix=None): """Returns list of ``url`` items to CRUD a model. """ model_lower = model.__name__.lower() if urlprefix is None: urlprefix = '' urlprefix += model_lower + '/' urls = crud_urls( model, list_view=CRUDListView.as_view(model=model), create_view=CRUDCreateView.as_view(model=model), detail_view=CRUDDetailView.as_view(model=model), update_view=CRUDUpdateView.as_view(model=model), delete_view=CRUDDeleteView.as_view(model=model), url_prefix=urlprefix, ) return urls
python
{ "resource": "" }