sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def authorization_header(oauth_params):
"""Return Authorization header"""
authorization_headers = 'OAuth realm="",'
authorization_headers += ','.join(['{0}="{1}"'.format(k, urllib.quote(str(v)))
for k, v in oauth_params.items()])
return authorization_headers | Return Authorization header | entailment |
def grnboost2(expression_data,
gene_names=None,
tf_names='all',
client_or_address='local',
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
limit=None,
seed=None,
verbose=False):
"""
Launch arboreto with [GRNBoost2] profile.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param early_stop_window_length: early stop window length. Default 25.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default None.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links.
"""
return diy(expression_data=expression_data, regressor_type='GBM', regressor_kwargs=SGBM_KWARGS,
gene_names=gene_names, tf_names=tf_names, client_or_address=client_or_address,
early_stop_window_length=early_stop_window_length, limit=limit, seed=seed, verbose=verbose) | Launch arboreto with [GRNBoost2] profile.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param early_stop_window_length: early stop window length. Default 25.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default None.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links. | entailment |
def genie3(expression_data,
gene_names=None,
tf_names='all',
client_or_address='local',
limit=None,
seed=None,
verbose=False):
"""
Launch arboreto with [GENIE3] profile.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default None.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links.
"""
return diy(expression_data=expression_data, regressor_type='RF', regressor_kwargs=RF_KWARGS,
gene_names=gene_names, tf_names=tf_names, client_or_address=client_or_address,
limit=limit, seed=seed, verbose=verbose) | Launch arboreto with [GENIE3] profile.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default None.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links. | entailment |
def diy(expression_data,
regressor_type,
regressor_kwargs,
gene_names=None,
tf_names='all',
client_or_address='local',
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
limit=None,
seed=None,
verbose=False):
"""
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param regressor_type: string. One of: 'RF', 'GBM', 'ET'. Case insensitive.
:param regressor_kwargs: a dictionary of key-value pairs that configures the regressor.
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param early_stop_window_length: early stopping window length.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default 666. Use None for random seed.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links.
"""
if verbose:
print('preparing dask client')
client, shutdown_callback = _prepare_client(client_or_address)
try:
if verbose:
print('parsing input')
expression_matrix, gene_names, tf_names = _prepare_input(expression_data, gene_names, tf_names)
if verbose:
print('creating dask graph')
graph = create_graph(expression_matrix,
gene_names,
tf_names,
client=client,
regressor_type=regressor_type,
regressor_kwargs=regressor_kwargs,
early_stop_window_length=early_stop_window_length,
limit=limit,
seed=seed)
if verbose:
print('{} partitions'.format(graph.npartitions))
print('computing dask graph')
return client \
.compute(graph, sync=True) \
.sort_values(by='importance', ascending=False)
finally:
shutdown_callback(verbose)
if verbose:
print('finished') | :param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param regressor_type: string. One of: 'RF', 'GBM', 'ET'. Case insensitive.
:param regressor_kwargs: a dictionary of key-value pairs that configures the regressor.
:param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as
'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:param early_stop_window_length: early stopping window length.
:param client_or_address: one of:
* None or 'local': a new Client(LocalCluster()) will be used to perform the computation.
* string address: a new Client(address) will be used to perform the computation.
* a Client instance: the specified Client instance will be used to perform the computation.
:param limit: optional number (int) of top regulatory links to return. Default None.
:param seed: optional random seed for the regressors. Default 666. Use None for random seed.
:param verbose: print info.
:return: a pandas DataFrame['TF', 'target', 'importance'] representing the inferred gene regulatory links. | entailment |
def _prepare_client(client_or_address):
"""
:param client_or_address: one of:
* None
* verbatim: 'local'
* string address
* a Client instance
:return: a tuple: (Client instance, shutdown callback function).
:raises: ValueError if no valid client input was provided.
"""
if client_or_address is None or str(client_or_address).lower() == 'local':
local_cluster = LocalCluster(diagnostics_port=None)
client = Client(local_cluster)
def close_client_and_local_cluster(verbose=False):
if verbose:
print('shutting down client and local cluster')
client.close()
local_cluster.close()
return client, close_client_and_local_cluster
elif isinstance(client_or_address, str) and client_or_address.lower() != 'local':
client = Client(client_or_address)
def close_client(verbose=False):
if verbose:
print('shutting down client')
client.close()
return client, close_client
elif isinstance(client_or_address, Client):
def close_dummy(verbose=False):
if verbose:
print('not shutting down client, client was created externally')
return None
return client_or_address, close_dummy
else:
raise ValueError("Invalid client specified {}".format(str(client_or_address))) | :param client_or_address: one of:
* None
* verbatim: 'local'
* string address
* a Client instance
:return: a tuple: (Client instance, shutdown callback function).
:raises: ValueError if no valid client input was provided. | entailment |
def _prepare_input(expression_data,
gene_names,
tf_names):
"""
Wrangle the inputs into the correct formats.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings).
Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:return: a triple of:
1. a np.ndarray or scipy.sparse.csc_matrix
2. a list of gene name strings
3. a list of transcription factor name strings.
"""
if isinstance(expression_data, pd.DataFrame):
expression_matrix = expression_data.as_matrix()
gene_names = list(expression_data.columns)
else:
expression_matrix = expression_data
assert expression_matrix.shape[1] == len(gene_names)
if tf_names is None:
tf_names = gene_names
elif tf_names == 'all':
tf_names = gene_names
else:
if len(tf_names) == 0:
raise ValueError('Specified tf_names is empty')
if not set(gene_names).intersection(set(tf_names)):
raise ValueError('Intersection of gene_names and tf_names is empty.')
return expression_matrix, gene_names, tf_names | Wrangle the inputs into the correct formats.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings).
Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:return: a triple of:
1. a np.ndarray or scipy.sparse.csc_matrix
2. a list of gene name strings
3. a list of transcription factor name strings. | entailment |
def _make_common_signature(self):
"""生成通用签名, 一般情况下,您不需要调用该方法 文档详见 http://docs.rongcloud.cn/server.html#_API_调用签名规则
:return: {'app-key':'xxx','nonce':'xxx','timestamp':'xxx','signature':'xxx'}
"""
nonce = str(random.random())
timestamp = str(int(time.time()) * 1000)
signature = hashlib.sha1((self._app_secret + nonce + timestamp).encode(
'utf-8')).hexdigest()
return {
"rc-app-key": self._app_key,
"rc-nonce": nonce,
"rc-timestamp": timestamp,
"rc-signature": signature
} | 生成通用签名, 一般情况下,您不需要调用该方法 文档详见 http://docs.rongcloud.cn/server.html#_API_调用签名规则
:return: {'app-key':'xxx','nonce':'xxx','timestamp':'xxx','signature':'xxx'} | entailment |
def _http_call(self, url, method, **kwargs):
"""Makes a http call. Logs response information."""
logging.debug("Request[{0}]: {1}".format(method, url))
start_time = datetime.datetime.now()
logging.debug("Header: {0}".format(kwargs['headers']))
logging.debug("Params: {0}".format(kwargs['data']))
response = requests.request(method, url, verify=False, **kwargs)
duration = datetime.datetime.now() - start_time
logging.debug("Response[{0:d}]: {1}, Duration: {2}.{3}s.".format(
response.status_code, response.reason, duration.seconds,
duration.microseconds))
return response | Makes a http call. Logs response information. | entailment |
def call_api(self,
action,
params=None,
method=('API', 'POST', 'application/x-www-form-urlencoded'),
**kwargs):
"""
:param method: methodName
:param action: MethodUrl,
:param params: Dictionary,form params for api.
:param timeout: (optional) Float describing the timeout of the request.
:return:
"""
urltype, methodname, content_type = method
if urltype == 'SMS':
url = self.sms_host
else:
url = self.api_host
if content_type == 'application/json':
data = json.dumps(params)
else:
data = self._filter_params(params)
return self._http_call(
url=url + action,
method=methodname,
data=data,
headers=self._headers(content_type),
**kwargs) | :param method: methodName
:param action: MethodUrl,
:param params: Dictionary,form params for api.
:param timeout: (optional) Float describing the timeout of the request.
:return: | entailment |
def calculate_width_and_height(url_parts, options):
'''Appends width and height information to url'''
width = options.get('width', 0)
has_width = width
height = options.get('height', 0)
has_height = height
flip = options.get('flip', False)
flop = options.get('flop', False)
if flip:
width = width * -1
if flop:
height = height * -1
if not has_width and not has_height:
if flip:
width = "-0"
if flop:
height = "-0"
if width or height:
url_parts.append('%sx%s' % (width, height)) | Appends width and height information to url | entailment |
def url_for(**options):
'''Returns the url for the specified options'''
url_parts = get_url_parts(**options)
image_hash = hashlib.md5(b(options['image_url'])).hexdigest()
url_parts.append(image_hash)
return "/".join(url_parts) | Returns the url for the specified options | entailment |
def mini(description, **kwargs):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
"""
kwargs['notifierFactory'] = GrowlNotifier
gntp.notifier.mini(description, **kwargs) | Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message | entailment |
def create(self, chatRoomInfo):
"""
创建聊天室方法 方法
@param chatRoomInfo:id:要创建的聊天室的id;name:要创建的聊天室的name。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
params = {
'chatroom[{0}]'.format(Id): name
for Id, name in chatRoomInfo
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/create.json',
params=params)
return Response(r, desc) | 创建聊天室方法 方法
@param chatRoomInfo:id:要创建的聊天室的id;name:要创建的聊天室的name。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def queryUser(self, chatroomId, count, order):
"""
查询聊天室内用户方法 方法
@param chatroomId:要查询的聊天室 ID。(必传)
@param count:要获取的聊天室成员数,上限为 500 ,超过 500 时最多返回 500 个成员。(必传)
@param order:加入聊天室的先后顺序, 1 为加入时间正序, 2 为加入时间倒序。(必传)
@return code:返回码,200 为正常。
@return total:聊天室中用户数。
@return users:聊天室成员列表。
@return errorMessage:错误信息。
"""
desc = {
"name": "ChatroomUserQueryReslut",
"desc": " chatroomUserQuery 返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "total",
"type": "Integer",
"desc": "聊天室中用户数。"
}, {
"name": "users",
"type": "List<ChatRoomUser>",
"desc": "聊天室成员列表。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/user/query.json',
params={"chatroomId": chatroomId,
"count": count,
"order": order})
return Response(r, desc) | 查询聊天室内用户方法 方法
@param chatroomId:要查询的聊天室 ID。(必传)
@param count:要获取的聊天室成员数,上限为 500 ,超过 500 时最多返回 500 个成员。(必传)
@param order:加入聊天室的先后顺序, 1 为加入时间正序, 2 为加入时间倒序。(必传)
@return code:返回码,200 为正常。
@return total:聊天室中用户数。
@return users:聊天室成员列表。
@return errorMessage:错误信息。 | entailment |
def stopDistributionMessage(self, chatroomId):
"""
聊天室消息停止分发方法(可实现控制对聊天室中消息是否进行分发,停止分发后聊天室中用户发送的消息,融云服务端不会再将消息发送给聊天室中其他用户。) 方法
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/message/stopDistribution.json',
params={"chatroomId": chatroomId})
return Response(r, desc) | 聊天室消息停止分发方法(可实现控制对聊天室中消息是否进行分发,停止分发后聊天室中用户发送的消息,融云服务端不会再将消息发送给聊天室中其他用户。) 方法
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def addGagUser(self, userId, chatroomId, minute):
"""
添加禁言聊天室成员方法(在 App 中如果不想让某一用户在聊天室中发言时,可将此用户在聊天室中禁言,被禁言用户可以接收查看聊天室中用户聊天信息,但不能发送消息.) 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@param minute:禁言时长,以分钟为单位,最大值为43200分钟。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/user/gag/add.json',
params={
"userId": userId,
"chatroomId": chatroomId,
"minute": minute
})
return Response(r, desc) | 添加禁言聊天室成员方法(在 App 中如果不想让某一用户在聊天室中发言时,可将此用户在聊天室中禁言,被禁言用户可以接收查看聊天室中用户聊天信息,但不能发送消息.) 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@param minute:禁言时长,以分钟为单位,最大值为43200分钟。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def rollbackBlockUser(self, userId, chatroomId):
"""
移除封禁聊天室成员方法 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/user/block/rollback.json',
params={"userId": userId,
"chatroomId": chatroomId})
return Response(r, desc) | 移除封禁聊天室成员方法 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def addPriority(self, objectName):
"""
添加聊天室消息优先级方法 方法
@param objectName:低优先级的消息类型,每次最多提交 5 个,设置的消息类型最多不超过 20 个。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/message/priority/add.json',
params={"objectName": objectName})
return Response(r, desc) | 添加聊天室消息优先级方法 方法
@param objectName:低优先级的消息类型,每次最多提交 5 个,设置的消息类型最多不超过 20 个。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def setUserPushTag(self, userTag):
"""
添加 Push 标签方法 方法
@param userTag:用户标签。
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/json'),
action='/user/tag/set.json',
params=userTag)
return Response(r, desc) | 添加 Push 标签方法 方法
@param userTag:用户标签。
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def broadcastPush(self, pushMessage):
"""
广播消息方法(fromuserid 和 message为null即为不落地的push) 方法
@param pushMessage:json数据
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/json'),
action='/push.json',
params=pushMessage)
return Response(r, desc) | 广播消息方法(fromuserid 和 message为null即为不落地的push) 方法
@param pushMessage:json数据
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def load_tf_names(path):
"""
:param path: the path of the transcription factor list file.
:return: a list of transcription factor names read from the file.
"""
with open(path) as file:
tfs_in_file = [line.strip() for line in file.readlines()]
return tfs_in_file | :param path: the path of the transcription factor list file.
:return: a list of transcription factor names read from the file. | entailment |
def sync(self, userId, groupInfo):
"""
同步用户所属群组方法(当第一次连接融云服务器时,需要向融云服务器提交 userId 对应的用户当前所加入的所有群组,此接口主要为防止应用中用户群信息同融云已知的用户所属群信息不同步。) 方法
@param userId:被同步群信息的用户 Id。(必传)
@param groupInfo:该用户的群信息,如群 Id 已经存在,则不会刷新对应群组名称,如果想刷新群组名称请调用刷新群组信息方法。
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
params = {'group[{0}]'.format(Id): name for Id, name in groupInfo}
params['userId'] = userId
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/group/sync.json',
params=params)
return Response(r, desc) | 同步用户所属群组方法(当第一次连接融云服务器时,需要向融云服务器提交 userId 对应的用户当前所加入的所有群组,此接口主要为防止应用中用户群信息同融云已知的用户所属群信息不同步。) 方法
@param userId:被同步群信息的用户 Id。(必传)
@param groupInfo:该用户的群信息,如群 Id 已经存在,则不会刷新对应群组名称,如果想刷新群组名称请调用刷新群组信息方法。
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def refresh(self, groupId, groupName):
"""
刷新群组信息方法 方法
@param groupId:群组 Id。(必传)
@param groupName:群名称。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/group/refresh.json',
params={"groupId": groupId,
"groupName": groupName})
return Response(r, desc) | 刷新群组信息方法 方法
@param groupId:群组 Id。(必传)
@param groupName:群名称。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def join(self, userId, groupId, groupName):
"""
将用户加入指定群组,用户将可以收到该群的消息,同一用户最多可加入 500 个群,每个群最大至 3000 人。 方法
@param userId:要加入群的用户 Id,可提交多个,最多不超过 1000 个。(必传)
@param groupId:要加入的群 Id。(必传)
@param groupName:要加入的群 Id 对应的名称。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/group/join.json',
params={
"userId": userId,
"groupId": groupId,
"groupName": groupName
})
return Response(r, desc) | 将用户加入指定群组,用户将可以收到该群的消息,同一用户最多可加入 500 个群,每个群最大至 3000 人。 方法
@param userId:要加入群的用户 Id,可提交多个,最多不超过 1000 个。(必传)
@param groupId:要加入的群 Id。(必传)
@param groupName:要加入的群 Id 对应的名称。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def queryUser(self, groupId):
"""
查询群成员方法 方法
@param groupId:群组Id。(必传)
@return code:返回码,200 为正常。
@return id:群成员用户Id。
@return users:群成员列表。
"""
desc = {
"name": "GroupUserQueryReslut",
"desc": "groupUserQuery返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "id",
"type": "String",
"desc": "群成员用户Id。"
}, {
"name": "users",
"type": "List<GroupUser>",
"desc": "群成员列表。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/group/user/query.json',
params={"groupId": groupId})
return Response(r, desc) | 查询群成员方法 方法
@param groupId:群组Id。(必传)
@return code:返回码,200 为正常。
@return id:群成员用户Id。
@return users:群成员列表。 | entailment |
def dismiss(self, userId, groupId):
"""
解散群组方法。(将该群解散,所有用户都无法再接收该群的消息。) 方法
@param userId:操作解散群的用户 Id。(必传)
@param groupId:要解散的群 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/group/dismiss.json',
params={"userId": userId,
"groupId": groupId})
return Response(r, desc) | 解散群组方法。(将该群解散,所有用户都无法再接收该群的消息。) 方法
@param userId:操作解散群的用户 Id。(必传)
@param groupId:要解散的群 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE') | Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message | entailment |
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info | Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line | entailment |
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
self.password = gntp.shim.b(password)
self.encryptAlgo = encryptAlgo.upper()
if not self.encryptAlgo in self.hash_algo:
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = self.hash_algo.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime().encode('utf8')
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper() | Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512 | entailment |
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result | Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string | entailment |
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True | Validate GNTP Message against stored password | entailment |
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | Verify required headers | entailment |
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info | Generate info line for GNTP Message
:return string: | entailment |
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers
"""
d = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = match.group(1).strip()
val = match.group(2).strip()
d[key] = val
return d | Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers | entailment |
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
data = gntp.shim.b(data)
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier | Add binary resource
:param string data: Binary Data | entailment |
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self.headers = self._parse_dict(parts[0]) | Decode GNTP Message
:param string data: | entailment |
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | Validate required headers and validate notification headers | entailment |
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice | Decode existing GNTP Registration message
:param string data: Message to decode | entailment |
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications)) | Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default | entailment |
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue() | Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string | entailment |
def getImageCode(self, appKey):
"""
获取图片验证码方法 方法
@param appKey:应用Id
@return code:返回码,200 为正常。
@return url:返回的图片验证码 URL 地址。
@return verifyId:返回图片验证标识 Id。
@return errorMessage:错误信息。
"""
desc = {
"name": "SMSImageCodeReslut",
"desc": " getImageCode 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "url",
"type": "String",
"desc": "返回的图片验证码 URL 地址。"
}, {
"name": "verifyId",
"type": "String",
"desc": "返回图片验证标识 Id。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('SMS', 'GET', ''),
action='/getImgCode.json',
params={"appKey": appKey})
return Response(r, desc) | 获取图片验证码方法 方法
@param appKey:应用Id
@return code:返回码,200 为正常。
@return url:返回的图片验证码 URL 地址。
@return verifyId:返回图片验证标识 Id。
@return errorMessage:错误信息。 | entailment |
def sendCode(self,
mobile,
templateId,
region,
verifyId=None,
verifyCode=None):
"""
发送短信验证码方法。 方法
@param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传)
@param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传)
@param region:手机号码所属国家区号,目前只支持中图区号 86)
@param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。
@param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。
@return code:返回码,200 为正常。
@return sessionId:短信验证码唯一标识。
@return errorMessage:错误信息。
"""
desc = {
"name": "SMSSendCodeReslut",
"desc": " SMSSendCodeReslut 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "sessionId",
"type": "String",
"desc": "短信验证码唯一标识。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('SMS', 'POST', 'application/x-www-form-urlencoded'),
action='/sendCode.json',
params={
"mobile": mobile,
"templateId": templateId,
"region": region,
"verifyId": verifyId,
"verifyCode": verifyCode
})
return Response(r, desc) | 发送短信验证码方法。 方法
@param mobile:接收短信验证码的目标手机号,每分钟同一手机号只能发送一次短信验证码,同一手机号 1 小时内最多发送 3 次。(必传)
@param templateId:短信模板 Id,在开发者后台->短信服务->服务设置->短信模版中获取。(必传)
@param region:手机号码所属国家区号,目前只支持中图区号 86)
@param verifyId:图片验证标识 Id ,开启图片验证功能后此参数必传,否则可以不传。在获取图片验证码方法返回值中获取。
@param verifyCode:图片验证码,开启图片验证功能后此参数必传,否则可以不传。
@return code:返回码,200 为正常。
@return sessionId:短信验证码唯一标识。
@return errorMessage:错误信息。 | entailment |
def verifyCode(self, sessionId, code):
"""
验证码验证方法 方法
@param sessionId:短信验证码唯一标识,在发送短信验证码方法,返回值中获取。(必传)
@param code:短信验证码内容。(必传)
@return code:返回码,200 为正常。
@return success:true 验证成功,false 验证失败。
@return errorMessage:错误信息。
"""
desc = {
"name": "SMSVerifyCodeResult",
"desc": " VerifyCode 返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "success",
"type": "Boolean",
"desc": "true 验证成功,false 验证失败。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('SMS', 'POST', 'application/x-www-form-urlencoded'),
action='/verifyCode.json',
params={"sessionId": sessionId,
"code": code})
return Response(r, desc) | 验证码验证方法 方法
@param sessionId:短信验证码唯一标识,在发送短信验证码方法,返回值中获取。(必传)
@param code:短信验证码内容。(必传)
@return code:返回码,200 为正常。
@return success:true 验证成功,false 验证失败。
@return errorMessage:错误信息。 | entailment |
def mini(description, applicationName='PythonMini', noteType="Message",
title="Mini Message", applicationIcon=None, hostname='localhost',
password=None, port=23053, sticky=False, priority=None,
callback=None, notificationIcon=None, identifier=None,
notifierFactory=GrowlNotifier):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
try:
growl = notifierFactory(
applicationName=applicationName,
notifications=[noteType],
defaultNotifications=[noteType],
applicationIcon=applicationIcon,
hostname=hostname,
password=password,
port=port,
)
result = growl.register()
if result is not True:
return result
return growl.notify(
noteType=noteType,
title=title,
description=description,
icon=notificationIcon,
sticky=sticky,
priority=priority,
callback=callback,
identifier=identifier,
)
except Exception:
# We want the "mini" function to be simple and swallow Exceptions
# in order to be less invasive
logger.exception("Growl error") | Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function | entailment |
def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register) | Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once | entailment |
def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice) | Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function | entailment |
def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub) | Send a Subscribe request to a remote machine | entailment |
def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform()) | Add optional Origin headers to message | entailment |
def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error() | Send the GNTP Packet | entailment |
def add(self, word):
"""
添加敏感词方法(设置敏感词后,App 中用户不会收到含有敏感词的消息内容,默认最多设置 50 个敏感词。) 方法
@param word:敏感词,最长不超过 32 个字符。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/wordfilter/add.json',
params={"word": word})
return Response(r, desc) | 添加敏感词方法(设置敏感词后,App 中用户不会收到含有敏感词的消息内容,默认最多设置 50 个敏感词。) 方法
@param word:敏感词,最长不超过 32 个字符。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。 | entailment |
def getList(self):
"""
查询敏感词列表方法 方法
@return code:返回码,200 为正常。
@return word:敏感词内容。
@return errorMessage:错误信息。
"""
desc = {
"name": "ListWordfilterReslut",
"desc": "listWordfilter返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "word",
"type": "String",
"desc": "敏感词内容。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/wordfilter/list.json',
params={})
return Response(r, desc) | 查询敏感词列表方法 方法
@return code:返回码,200 为正常。
@return word:敏感词内容。
@return errorMessage:错误信息。 | entailment |
def local_path(force_download=False):
"""Downloads allele database from IEDB, returns local path to XML file."""
return cache.fetch(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS,
force=force_download) | Downloads allele database from IEDB, returns local path to XML file. | entailment |
def delete():
"""Deletes local XML file"""
path = cache.local_path(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS)
os.remove(path) | Deletes local XML file | entailment |
def load_alleles():
"""Parses the IEDB MhcAlleleName XML file and returns a list of Allele
namedtuple objects containing information about that each allele's HLA
class and source organism.
"""
result = []
path = local_path()
etree = xml.etree.ElementTree.parse(path)
for allele in etree.iterfind("MhcAlleleName"):
name_element = allele.find("DisplayedRestriction")
mhc_class_element = allele.find("Class")
# need at least a name and an HLA class
if name_element is None or mhc_class_element is None:
continue
name = name_element.text
synonyms = set([])
for synonym_element in allele.iterfind("Synonyms"):
for synonym in synonym_element.text.split(","):
synonyms.add(synonym.strip())
mhc_class = mhc_class_element.text
organism_element = allele.find("Organsim")
if organism_element is None:
organism = None
else:
organism = organism_element.text
locus_element = allele.find("Locus")
if locus_element is None:
locus = None
else:
locus = locus_element.text
allele_object = Allele(
name=name,
mhc_class=mhc_class,
locus=locus,
organism=organism,
synonyms=synonyms)
result.append(allele_object)
return result | Parses the IEDB MhcAlleleName XML file and returns a list of Allele
namedtuple objects containing information about that each allele's HLA
class and source organism. | entailment |
def load_alleles_dict():
"""Create a dictionary mapping each unique allele name to a namedtuple
containing information about that alleles class, locus, species, &c.
"""
alleles = load_alleles()
result = {}
for allele in alleles:
for name in {allele.name}.union(allele.synonyms):
result[name] = allele
return result | Create a dictionary mapping each unique allele name to a namedtuple
containing information about that alleles class, locus, species, &c. | entailment |
def read_pmbec_coefficients(
key_type='row',
verbose=True,
filename=join(MATRIX_DIR, 'pmbec.mat')):
"""
Parameters
------------
filename : str
Location of PMBEC coefficient matrix
key_type : str
'row' : every key is a single amino acid,
which maps to a dictionary for that row
'pair' : every key is a tuple of amino acids
'pair_string' : every key is a string of two amino acid characters
verbose : bool
Print rows of matrix as we read them
"""
d = {}
if key_type == 'row':
def add_pair(row_letter, col_letter, value):
if row_letter not in d:
d[row_letter] = {}
d[row_letter][col_letter] = value
elif key_type == 'pair':
def add_pair(row_letter, col_letter, value):
d[(row_letter, col_letter)] = value
else:
assert key_type == 'pair_string', \
"Invalid dictionary key type: %s" % key_type
def add_pair(row_letter, col_letter, value):
d["%s%s" % (row_letter, col_letter)] = value
with open(filename, 'r') as f:
lines = [line for line in f.read().split('\n') if len(line) > 0]
header = lines[0]
if verbose:
print(header)
residues = [
x for x in header.split()
if len(x) == 1 and x != ' ' and x != '\t'
]
assert len(residues) == 20
if verbose:
print(residues)
for line in lines[1:]:
cols = [
x
for x in line.split(' ')
if len(x) > 0 and x != ' ' and x != '\t'
]
assert len(cols) == 21, "Expected 20 values + letter, got %s" % cols
row_letter = cols[0]
for i, col in enumerate(cols[1:]):
col_letter = residues[i]
assert col_letter != ' ' and col_letter != '\t'
value = float(col)
add_pair(row_letter, col_letter, value)
return d | Parameters
------------
filename : str
Location of PMBEC coefficient matrix
key_type : str
'row' : every key is a single amino acid,
which maps to a dictionary for that row
'pair' : every key is a tuple of amino acids
'pair_string' : every key is a string of two amino acid characters
verbose : bool
Print rows of matrix as we read them | entailment |
async def async_run_command(self, command, retry=False):
"""Run commands through an SSH connection.
Connect to the SSH server if not currently connected, otherwise
use the existing connection.
"""
if not self.is_connected:
await self.async_connect()
try:
result = await asyncio.wait_for(self._client.run(
"%s && %s" % (_PATH_EXPORT_COMMAND, command)), 9)
except asyncssh.misc.ChannelOpenError:
if not retry:
await self.async_connect()
return self.async_run_command(command, retry=True)
else:
self._connected = False
_LOGGER.error("No connection to host")
return []
except TimeoutError:
del self._client
self._connected = False
_LOGGER.error("Host timeout.")
return []
self._connected = True
return result.stdout.split('\n') | Run commands through an SSH connection.
Connect to the SSH server if not currently connected, otherwise
use the existing connection. | entailment |
async def async_connect(self):
"""Fetches the client or creates a new one."""
kwargs = {
'username': self._username if self._username else None,
'client_keys': [self._ssh_key] if self._ssh_key else None,
'port': self._port,
'password': self._password if self._password else None,
'known_hosts': None
}
self._client = await asyncssh.connect(self._host, **kwargs)
self._connected = True | Fetches the client or creates a new one. | entailment |
async def async_run_command(self, command, first_try=True):
"""Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection.
"""
await self.async_connect()
try:
with (await self._io_lock):
self._writer.write('{}\n'.format(
"%s && %s" % (
_PATH_EXPORT_COMMAND, command)).encode('ascii'))
data = ((await asyncio.wait_for(self._reader.readuntil(
self._prompt_string), 9)).split(b'\n')[1:-1])
except (BrokenPipeError, LimitOverrunError):
if first_try:
return await self.async_run_command(command, False)
else:
_LOGGER.warning("connection is lost to host.")
return[]
except TimeoutError:
_LOGGER.error("Host timeout.")
return []
finally:
self._writer.close()
return [line.decode('utf-8') for line in data] | Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection. | entailment |
async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True | Connect to the ASUS-WRT Telnet server. | entailment |
async def _parse_lines(lines, regex):
"""Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output.
"""
results = []
if inspect.iscoroutinefunction(lines):
lines = await lines
for line in lines:
if line:
match = regex.search(line)
if not match:
_LOGGER.debug("Could not parse row: %s", line)
continue
results.append(match.groupdict())
return results | Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output. | entailment |
async def async_get_connected_devices(self):
"""Retrieve data from ASUSWRT.
Calls various commands on the router and returns the superset of all
responses. Some commands will not work on some routers.
"""
devices = {}
dev = await self.async_get_wl()
devices.update(dev)
dev = await self.async_get_arp()
devices.update(dev)
dev = await self.async_get_neigh(devices)
devices.update(dev)
if not self.mode == 'ap':
dev = await self.async_get_leases(devices)
devices.update(dev)
ret_devices = {}
for key in devices:
if not self.require_ip or devices[key].ip is not None:
ret_devices[key] = devices[key]
return ret_devices | Retrieve data from ASUSWRT.
Calls various commands on the router and returns the superset of all
responses. Some commands will not work on some routers. | entailment |
async def async_get_bytes_total(self, use_cache=True):
"""Retrieve total bytes (rx an tx) from ASUSWRT."""
now = datetime.utcnow()
if use_cache and self._trans_cache_timer and self._cache_time > \
(now - self._trans_cache_timer).total_seconds():
return self._transfer_rates_cache
rx = await self.async_get_rx()
tx = await self.async_get_tx()
return rx, tx | Retrieve total bytes (rx an tx) from ASUSWRT. | entailment |
async def async_get_current_transfer_rates(self, use_cache=True):
"""Gets current transfer rates calculated in per second in bytes."""
now = datetime.utcnow()
data = await self.async_get_bytes_total(use_cache)
if self._rx_latest is None or self._tx_latest is None:
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
return self._latest_transfer_data
time_diff = now - self._latest_transfer_check
if time_diff.total_seconds() < 30:
return self._latest_transfer_data
if data[0] < self._rx_latest:
rx = data[0]
else:
rx = data[0] - self._rx_latest
if data[1] < self._tx_latest:
tx = data[1]
else:
tx = data[1] - self._tx_latest
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
self._latest_transfer_data = (
math.ceil(rx / time_diff.total_seconds()) if rx > 0 else 0,
math.ceil(tx / time_diff.total_seconds()) if tx > 0 else 0)
return self._latest_transfer_data | Gets current transfer rates calculated in per second in bytes. | entailment |
async def async_current_transfer_human_readable(
self, use_cache=True):
"""Gets current transfer rates in a human readable format."""
rx, tx = await self.async_get_current_transfer_rates(use_cache)
return "%s/s" % convert_size(rx), "%s/s" % convert_size(tx) | Gets current transfer rates in a human readable format. | entailment |
def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
nrows=None):
"""
Load IEDB T-cell data without aggregating multiple entries for same epitope
Parameters
----------
mhc_class: {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla: regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla: regex pattern, optional
Exclude certain HLA types
human_only: bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method string, optional
Only collect results with assay methods containing the given string
assay_group: string, optional
Only collect results with assay groups containing the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet: dictionary, optional
Remap amino acid letters to some other alphabet
nrows: int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
path = local_path()
df = pd.read_csv(
path,
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1")
print(df.head())
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
assay_group_column_key = ("Assay", "Assay Group")
assay_method_column_key = ("Assay", "Method/Technique")
epitopes = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
organism = df['Host Organism Name']
mask &= organism.str.startswith('Homo sapiens', na=False).astype('bool')
# Match known alleles such as "HLA-A*02:01",
# broader groupings such as "HLA-A2"
# and unknown alleles of the MHC-1 listed either as
# "HLA-Class I,allele undetermined"
# or
# "Class I,allele undetermined"
mhc = df[mhc_allele_column_key]
if mhc_class is not None:
# since MHC classes can be specified as either strings ("I") or integers
# standard them to be strings
if mhc_class == 1:
mhc_class = "I"
elif mhc_class == 2:
mhc_class = "II"
if mhc_class not in {"I", "II"}:
raise ValueError("Invalid MHC class: %s" % mhc_class)
allele_dict = load_alleles_dict()
mhc_class_mask = [False] * len(df)
for i, allele_name in enumerate(mhc):
allele_object = allele_dict.get(allele_name)
if allele_object and allele_object.mhc_class == mhc_class:
mhc_class_mask[i] = True
mask &= np.array(mhc_class_mask)
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df[assay_group_column_key].str.contains(assay_group)
if assay_method:
mask &= df[assay_method_column_key].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask]
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | Load IEDB T-cell data without aggregating multiple entries for same epitope
Parameters
----------
mhc_class: {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla: regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla: regex pattern, optional
Exclude certain HLA types
human_only: bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method string, optional
Only collect results with assay methods containing the given string
assay_group: string, optional
Only collect results with assay groups containing the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet: dictionary, optional
Remap amino acid letters to some other alphabet
nrows: int, optional
Don't load the full IEDB dataset but instead read only the first nrows | entailment |
def parse_blosum_table(table, coeff_type=int, key_type='row'):
"""
Parse a table of pairwise amino acid coefficient (e.g. BLOSUM50)
"""
lines = table.split("\n")
# drop comments
lines = [line for line in lines if not line.startswith("#")]
# drop CR endline characters
lines = [line.replace("\r", "") for line in lines]
# skip empty lines
lines = [line for line in lines if line]
labels = lines[0].split()
if len(labels) < 20:
raise ValueError(
"Expected 20+ amino acids but first line '%s' has %d fields" % (
lines[0],
len(labels)))
coeffs = {}
for line in lines[1:]:
fields = line.split()
assert len(fields) >= 21, \
"Expected AA and 20+ coefficients but '%s' has %d fields" % (
line, len(fields))
x = fields[0]
for i, coeff_str in enumerate(fields[1:]):
y = labels[i]
coeff = coeff_type(coeff_str)
if key_type == 'pair':
coeffs[(x, y)] = coeff
elif key_type == 'pair_string':
coeffs[x + y] = coeff
else:
assert key_type == 'row', "Unknown key type: %s" % key_type
if x not in coeffs:
coeffs[x] = {}
coeffs[x][y] = coeff
return coeffs | Parse a table of pairwise amino acid coefficient (e.g. BLOSUM50) | entailment |
def _prepare_memoization_key(args, kwargs):
"""
Make a tuple of arguments which can be used as a key
for a memoized function's lookup_table. If some object can't be hashed
then used its __repr__ instead.
"""
key_list = []
for arg in args:
try:
hash(arg)
key_list.append(arg)
except:
key_list.append(repr(arg))
for (k, v) in kwargs.items():
try:
hash(k)
hash(v)
key_list.append((k, v))
except:
key_list.append((repr(k), repr(v)))
return tuple(key_list) | Make a tuple of arguments which can be used as a key
for a memoized function's lookup_table. If some object can't be hashed
then used its __repr__ instead. | entailment |
def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
warn_bad_lines=True,
nrows=None):
"""
Load IEDB MHC data without aggregating multiple entries for the same epitope
Parameters
----------
mhc_class : {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla : regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla : regex pattern, optional
Exclude certain HLA types
human_only : bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method : string, optional
Limit to assay methods which contain the given string
assay_group : string, optional
Limit to assay groups which contain the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet : dictionary, optional
Remap amino acid letters to some other alphabet
warn_bad_lines : bool, optional
The full MHC ligand dataset seems to contain several dozen lines with
too many fields. This currently results in a lot of warning messages
from Pandas, which you can turn off with this option (default = True)
nrows : int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
df = pd.read_csv(
local_path(),
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1",
warn_bad_lines=warn_bad_lines)
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
epitopes = df[epitope_column_key] = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
mask &= df[mhc_allele_column_key].str.startswith("HLA").astype("bool")
if mhc_class == 1:
mask &= df["MHC"]["MHC allele class"] == "I"
elif mhc_class == 2:
mask &= df["MHC"]["MHC allele class"] == "II"
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df["Assay"]["Assay Group"].str.contains(assay_group)
if assay_method:
mask &= df["Assay"]["Method/Technique"].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask].copy()
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | Load IEDB MHC data without aggregating multiple entries for the same epitope
Parameters
----------
mhc_class : {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla : regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla : regex pattern, optional
Exclude certain HLA types
human_only : bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method : string, optional
Limit to assay methods which contain the given string
assay_group : string, optional
Limit to assay groups which contain the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet : dictionary, optional
Remap amino acid letters to some other alphabet
warn_bad_lines : bool, optional
The full MHC ligand dataset seems to contain several dozen lines with
too many fields. This currently results in a lot of warning messages
from Pandas, which you can turn off with this option (default = True)
nrows : int, optional
Don't load the full IEDB dataset but instead read only the first nrows | entailment |
def get_fieldsets(self, request, obj=None):
"""
Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com)
"""
fieldsets = list(super(CreateUpdateAdmin, self).get_fieldsets(
request=request, obj=obj))
# Create sets for future use
fields = set()
to_add = set()
# Prepare a set of existing fields in fieldset
for fs in fieldsets:
fields = fields.union(fs[1]['fields'])
# Loop over ownership info fields
for k, v in self.ownership_info['fields'].items():
# Check if current model has k attribute
# and field k is not already in fieldset
# and field k has not been excluded
if (hasattr(self.model, k)
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
# Now, let's hide fields in add form, it will be empty
# Check if readonly property is not True
# or this is an edit form
if ('readonly' in v and not v['readonly']) or obj:
to_add.add(k)
# If to_add set is not empty, add ownership info to fieldset
if len(to_add) > 0:
fieldsets.append((self.ownership_info['label'],
{'fields': tuple(to_add)}))
return tuple(fieldsets) | Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com) | entailment |
def get_readonly_fields(self, request, obj=None):
"""
Makes `created_by`, `create_date` & `update_date` readonly when
editing.
Author: Himanshu Shankar (https://himanshus.com)
"""
# Get read only fields from super
fields = list(super(CreateUpdateAdmin, self).get_readonly_fields(
request=request, obj=obj))
# Loop over ownership info field
for k, v in self.ownership_info['fields'].items():
# Check if model has k attribute
# and field k is readonly
# and k is not already in fields
# and k is not in excluded field
# (if not checked, form.Meta.exclude has same field twice)
if (hasattr(self.model, k)
and ('readonly' in v and v['readonly'])
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
fields.append(k)
return tuple(fields) | Makes `created_by`, `create_date` & `update_date` readonly when
editing.
Author: Himanshu Shankar (https://himanshus.com) | entailment |
def get_authorization(self, request):
"""
This function extracts the authorization JWT string. It first
looks for specified key in header and then looks
for the same in body part.
Parameters
----------
request: HttpRequest
This is the raw request that user has sent.
Returns
-------
auth: str
Return request's 'JWT_AUTH_KEY:' content from body or
Header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
from django.utils.six import text_type
from rest_framework import HTTP_HEADER_ENCODING
auth = request.data.get(self.key, b'') or request.META.get(
self.header_key, b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth | This function extracts the authorization JWT string. It first
looks for specified key in header and then looks
for the same in body part.
Parameters
----------
request: HttpRequest
This is the raw request that user has sent.
Returns
-------
auth: str
Return request's 'JWT_AUTH_KEY:' content from body or
Header, as a bytestring.
Hide some test client ickyness where the header can be unicode. | entailment |
def get_jwt_value(self, request):
"""
This function has been overloaded and it returns the proper JWT
auth string.
Parameters
----------
request: HttpRequest
This is the request that is received by DJango in the view.
Returns
-------
str
This returns the extracted JWT auth token string.
"""
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from rest_framework import exceptions
auth = self.get_authorization(request).split()
auth_header_prefix = self.prefix.lower() or ''
if not auth:
if self.cookie:
return request.COOKIES.get(self.cookie)
return None
if auth_header_prefix is None or len(auth_header_prefix) < 1:
auth.append('')
auth.reverse()
if smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1:
msg = _('Invalid Authorization header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return auth[1] | This function has been overloaded and it returns the proper JWT
auth string.
Parameters
----------
request: HttpRequest
This is the request that is received by DJango in the view.
Returns
-------
str
This returns the extracted JWT auth token string. | entailment |
def main(args=None):
"""Call the CLI interface and wait for the result."""
retcode = 0
try:
ci = CliInterface()
args = ci.parser.parse_args()
result = args.func(args)
if result is not None:
print(result)
retcode = 0
except Exception:
retcode = 1
traceback.print_exc()
sys.exit(retcode) | Call the CLI interface and wait for the result. | entailment |
def write_toc(self, args):
"""Write the table of contents."""
# FIXME: Can this logic be moved into the create_parser function?
ordered = False
if args.ordered_list_marker is not None:
list_marker = args.ordered_list_marker
ordered = True
elif args.unordered_list_marker is not None:
list_marker = args.unordered_list_marker
else:
list_marker = md_parser[
args.parser]['list']['unordered']['default_marker']
toc_struct = build_multiple_tocs(
filenames=args.filename,
ordered=ordered,
no_links=args.no_links,
no_indentation=args.no_indentation,
no_list_coherence=args.no_list_coherence,
keep_header_levels=int(args.header_levels),
parser=args.parser,
list_marker=list_marker)
if args.in_place:
write_strings_on_files_between_markers(
filenames=args.filename,
strings=toc_struct,
marker=args.toc_marker)
else:
for toc in toc_struct:
print(toc, end='') | Write the table of contents. | entailment |
def create_parser(self):
"""Create the CLI parser."""
parser = argparse.ArgumentParser(
description=PROGRAM_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(PROGRAM_EPILOG))
parser.add_argument(
'filename',
metavar='FILE_NAME',
nargs='*',
help='the I/O file name')
subparsers = parser.add_subparsers(
dest='parser', title='markdown parser')
subparsers.required = True
# github + cmark + gitlab + commonmarker.
github = subparsers.add_parser(
'github',
aliases=['cmark', 'gitlab', 'commonmarker'],
description='Use Commonmark rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below')
megroup = github.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['github']['list']['unordered']['bullet_markers'],
nargs='?',
const=md_parser['github']['list']['unordered']['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['github']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['github']['list']['ordered']['closing_markers'],
nargs='?',
const=md_parser['github']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['github']['list']['ordered']['default_closing_marker'])
github.add_argument(
'-l',
'--header-levels',
choices=[
str(i)
for i in range(1, md_parser['github']['header']['max_levels'] +
1)
],
nargs='?',
const=str(md_parser['github']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['github']['header']['default_keep_levels']))
github.set_defaults(
header_levels=md_parser['github']['header']['default_keep_levels'])
# Redcarpet.
redcarpet = subparsers.add_parser(
'redcarpet',
description='Use Redcarpet rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below. Gitlab rules are the same as \
Redcarpet except that conflicts are avoided with \
duplicate headers.')
megroup = redcarpet.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['redcarpet']['list']['unordered']
['bullet_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['unordered']
['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['redcarpet']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['redcarpet']['list']['ordered']
['closing_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['redcarpet']['list']['ordered']['default_closing_marker']
)
redcarpet.add_argument(
'-l',
'--header-levels',
choices=[
str(i) for i in range(
1, md_parser['redcarpet']['header']['max_levels'] + 1)
],
nargs='?',
const=str(md_parser['redcarpet']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['redcarpet']['header']['default_keep_levels']))
redcarpet.set_defaults(header_levels=md_parser['redcarpet']['header']
['default_keep_levels'])
c_or_i = parser.add_mutually_exclusive_group()
c_or_i.add_argument(
'-c',
'--no-list-coherence',
action='store_true',
help='avoids checking for TOC list coherence')
c_or_i.add_argument(
'-i',
'--no-indentation',
action='store_true',
help='avoids adding indentations to the TOC')
parser.add_argument(
'-l',
'--no-links',
action='store_true',
help='avoids adding links to the TOC')
parser.add_argument(
'-m',
'--toc-marker',
metavar='TOC_MARKER',
help='set the string to be used as the marker for positioning the \
table of contents. Defaults to ' +
common_defaults['toc_marker'])
parser.add_argument(
'-p',
'--in-place',
action='store_true',
help='overwrite the input file')
parser.add_argument(
'-v',
'--version',
action='version',
version=VERSION_NAME + ' ' + VERSION_NUMBER)
parser.set_defaults(toc_marker=common_defaults['toc_marker'])
parser.set_defaults(func=CliToApi().write_toc)
return parser | Create the CLI parser. | entailment |
def patch(func=None, obj=None, name=None, avoid_doublewrap=True):
"""
Decorator for monkeypatching functions on modules and classes.
Example::
# This replaces FooClass.bar with our method
@monkeybiz.patch(FooClass)
def bar(original_bar, *args, **kwargs):
print "Patched!"
return original_bar(*args, **kwargs)
# This replaces FooClass.bar and foomodule.bar with our method
@monkeybiz.patch([FooClass, foomodule])
def bar(original_bar, *args, **kwargs):
#...
The first argument to ``monkeybiz.patch`` can be either a module, a class,
or a list of modules and/or classes. The decorator also takes optional
``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is
omitted, the name of the function being patched will be the name of the
function being decorated. If ``avoid_doublewrap`` is True (the default),
then functions and methods can only be patched once using this function.
Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its
original.
"""
if obj is None:
if isinstance(func, (type, ModuleType)):
obj = func
func = None
elif isinstance(func, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in func]):
obj = func
func = None
if func is None:
return functools.partial(patch, obj=obj, name=name, avoid_doublewrap=avoid_doublewrap)
if name is None:
name = func.__name__
if isinstance(obj, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in obj]):
return [patch(func=func, obj=o, name=name, avoid_doublewrap=avoid_doublewrap) for o in obj]
if not isinstance(obj, (ModuleType, type)):
raise ValueError(
"Argument passed to @patch decorator must be a "
"class or module, or a list of classes and modules")
try:
call = getattr(obj, name)
except AttributeError:
raise TypeError("%(func_repr)s does not exist" % {
'func_repr': '.'.join(
filter(None, [
getattr(obj, '__module__', None),
obj.__name__,
func.__name__],
)),
})
# optionally avoid multiple identical wrappings
if avoid_doublewrap and getattr(call, 'wrapper', None) is func:
return
# get underlying function (if it's an unbound method)
try:
original_callable = six.get_method_function(call)
except AttributeError:
original_callable = call
@six.wraps(func)
def wrapper(*args, **kwargs):
return func(original_callable, *args, **kwargs)
# set attributes, for future unwrapping and to avoid double-wrapping
wrapper.original = call
wrapper.wrapper = func
if six.PY2 and inspect.isclass(obj):
# rewrap staticmethod and classmethod specifically (iff obj is a class)
if hasattr(call, 'im_self'):
if call.im_self:
wrapper = classmethod(wrapper)
else:
wrapper = staticmethod(wrapper)
# finally, install the func closure as requested
setattr(obj, name, wrapper)
return getattr(obj, name) | Decorator for monkeypatching functions on modules and classes.
Example::
# This replaces FooClass.bar with our method
@monkeybiz.patch(FooClass)
def bar(original_bar, *args, **kwargs):
print "Patched!"
return original_bar(*args, **kwargs)
# This replaces FooClass.bar and foomodule.bar with our method
@monkeybiz.patch([FooClass, foomodule])
def bar(original_bar, *args, **kwargs):
#...
The first argument to ``monkeybiz.patch`` can be either a module, a class,
or a list of modules and/or classes. The decorator also takes optional
``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is
omitted, the name of the function being patched will be the name of the
function being decorated. If ``avoid_doublewrap`` is True (the default),
then functions and methods can only be patched once using this function.
Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its
original. | entailment |
def unpatch(obj, name):
"""
Undo the effects of patch(func, obj, name)
"""
setattr(obj, name, getattr(obj, name).original) | Undo the effects of patch(func, obj, name) | entailment |
def validate_email(email):
"""
Validates an email address
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
email: str
Returns
-------
bool
"""
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
return True
except ValidationError:
return False | Validates an email address
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
email: str
Returns
-------
bool | entailment |
def get_mobile_number(mobile):
"""
Returns a mobile number after removing blanks
Author: Himanshu Shankar (https://himanshus.com)
Parameters
----------
mobile: str
Returns
-------
str
"""
blanks = [' ', '.', ',', '(', ')', '-']
for b in blanks:
mobile = mobile.replace(b, '')
return mobile | Returns a mobile number after removing blanks
Author: Himanshu Shankar (https://himanshus.com)
Parameters
----------
mobile: str
Returns
-------
str | entailment |
def paginate_data(searched_data, request_data):
"""
Paginates the searched_data as per the request_data
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
searched_data: Serializer.data
It is the data received from queryset. It uses
show_serializer
request_data: Serializer.data
It is the request data. It uses serializer_class.
Returns
-------
data: dict
"""
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
if int(request_data.data['paginator']) > 0:
paginator = Paginator(searched_data.data,
request_data.data['paginator'])
try:
curr = paginator.page(request_data.data['page'])
except PageNotAnInteger:
curr = paginator.page(1)
except EmptyPage:
curr = paginator.page(paginator.num_pages)
data = {'total_pages': paginator.num_pages, 'current': curr.number,
'total_objects': len(searched_data.data)}
if curr.has_next():
data['next'] = curr.next_page_number()
else:
data['next'] = -1
if curr.number > 1:
data['previous'] = curr.previous_page_number()
else:
data['previous'] = -1
data['objects'] = curr.object_list
else:
data = {'objects': searched_data.data, 'previous': -1, 'next': -1,
'total_pages': 1, 'current': 1,
'total_objects': len(searched_data.data)}
return data | Paginates the searched_data as per the request_data
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
searched_data: Serializer.data
It is the data received from queryset. It uses
show_serializer
request_data: Serializer.data
It is the request data. It uses serializer_class.
Returns
-------
data: dict | entailment |
def send_message(message: str, subject: str, recip: list, recip_email: list,
html_message: str = None):
"""
Sends message to specified value.
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
message: str
Message that is to be sent to user.
subject: str
Subject that is to be sent to user, in case prop is an email.
recip: list
Recipient to whom message is being sent.
recip_email: list
Recipient to whom EMail is being sent. This will be deprecated once
SMS feature is brought in.
html_message: str
HTML variant of message, if any.
Returns
-------
sent: dict
"""
import smtplib
from django.conf import settings
from django.core.mail import send_mail
from sendsms import api
sent = {'success': False, 'message': None}
if not getattr(settings, 'EMAIL_HOST', None):
raise ValueError('EMAIL_HOST must be defined in django '
'setting for sending mail.')
if not getattr(settings, 'EMAIL_FROM', None):
raise ValueError('EMAIL_FROM must be defined in django setting '
'for sending mail. Who is sending email?')
if not getattr(settings, 'EMAIL_FROM', None):
raise ValueError('EMAIL_FROM must be defined in django setting '
'for sending mail. Who is sending email?')
# Check if there is any recipient
if not len(recip) > 0:
raise ValueError('No recipient to send message.')
# Check if the value of recipient is valid (min length: a@b.c)
elif len(recip[0]) < 5:
raise ValueError('Invalid recipient.')
# Check if all recipient in list are of same type
is_email = validate_email(recip[0])
for ind in range(len(recip)):
if validate_email(recip[ind]) is not is_email:
raise ValueError('All recipient should be of same type.')
elif not is_email:
recip[ind] = get_mobile_number(recip[ind])
# Check if fallback email is indeed an email
for rcp in recip_email:
if not validate_email(rcp):
raise ValueError('Invalid email provided: {}'.format(rcp))
if isinstance(recip, str):
# For backsupport
recip = [recip]
if isinstance(recip_email, str):
# For backsupport
recip_email = [recip_email]
if is_email:
try:
send_mail(subject=subject, message=message,
html_message=html_message,
from_email=settings.EMAIL_FROM, recipient_list=recip)
except smtplib.SMTPException as ex:
sent['message'] = 'Message sending failed!' + str(ex.args)
sent['success'] = False
else:
sent['message'] = 'Message sent successfully!'
sent['success'] = True
else:
try:
api.send_sms(body=message, to=recip, from_phone=None)
# Django SendSMS doesn't provide an output of success/failure.
# Send mail either ways, just to ensure delivery.
send_message(message=message, subject=subject, recip=recip_email,
recip_email=recip_email,
html_message=html_message)
except Exception as ex:
sent['message'] = 'Message sending Failed!' + str(ex.args)
sent['success'] = False
send_message(message=message, subject=subject,
recip=recip_email,
recip_email=recip_email,
html_message=html_message)
else:
sent['message'] = 'Message sent successfully!'
sent['success'] = True
return sent | Sends message to specified value.
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
message: str
Message that is to be sent to user.
subject: str
Subject that is to be sent to user, in case prop is an email.
recip: list
Recipient to whom message is being sent.
recip_email: list
Recipient to whom EMail is being sent. This will be deprecated once
SMS feature is brought in.
html_message: str
HTML variant of message, if any.
Returns
-------
sent: dict | entailment |
def has_object_permission(self, request, view, obj):
"""
Checks if user is superuser or it has permission over object
Parameters
----------
request
view
obj
Returns
-------
"""
return (
request.user.is_superuser or
super(IAWPOrSuperuser, self).has_object_permission(
request=request, view=view, obj=obj
)
) | Checks if user is superuser or it has permission over object
Parameters
----------
request
view
obj
Returns
------- | entailment |
def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False) | r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception. | entailment |
def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1 | r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception. | entailment |
def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc | r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception. | entailment |
def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct | r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception. | entailment |
def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker | r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception. | entailment |
def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log | r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only. | entailment |
def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr | r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces. | entailment |
def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent | r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception. | entailment |
def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line | r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception. | entailment |
def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage | r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page. | entailment |
def get_atx_heading(line: str,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False):
r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception.
"""
assert keep_header_levels >= 1
if len(line) == 0:
return None
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if line[0] == '\u005c':
return None
i = 0
while i < len(line) and line[i] == ' ' and i <= md_parser['github'][
'header']['max_space_indentation']:
i += 1
if i > md_parser['github']['header']['max_space_indentation']:
return None
offset = i
while i < len(line) and line[i] == '#' and i <= md_parser['github'][
'header']['max_levels'] + offset:
i += 1
if i - offset > md_parser['github']['header'][
'max_levels'] or i - offset > keep_header_levels or i - offset == 0:
return None
current_headers = i - offset
# Include special cases for line endings which should not be
# discarded as non-ATX headers.
if i < len(line) and (line[i] != ' ' and line[i] != '\u000a'
and line[i] != '\u000d'):
return None
i += 1
# Exclude leading whitespaces after the ATX header identifier.
while i < len(line) and line[i] == ' ':
i += 1
# An algorithm to find the start and the end of the closing sequence.
# The closing sequence includes all the significant part of the
# string. This algorithm has a complexity of O(n) with n being the
# length of the line.
cs_start = i
cs_end = cs_start
line_prime = line[::-1]
hash_char_rounds = 0
go_on = True
i = 0
i_prev = i
while i < len(line) - cs_start - 1 and go_on:
if ((line_prime[i] != ' ' and line_prime[i] != '#')
or hash_char_rounds > 1):
if i > i_prev:
cs_end = len(line_prime) - i_prev
else:
cs_end = len(line_prime) - i
go_on = False
while go_on and line_prime[i] == ' ':
i += 1
i_prev = i
while go_on and line_prime[i] == '#':
i += 1
if i > i_prev:
hash_char_rounds += 1
# Instead of changing the whole algorithm to check for line
# endings, this seems cleaner.
find_newline = line.find('\u000a')
find_carriage_return = line.find('\u000d')
if find_newline != -1:
cs_end = min(cs_end, find_newline)
if find_carriage_return != -1:
cs_end = min(cs_end, find_carriage_return)
final_line = line[cs_start:cs_end]
if not no_links:
if len(final_line) > 0 and final_line[-1] == '\u005c':
final_line += ' '
if len(
final_line.strip('\u0020').strip('\u0009').strip('\u000a').
strip('\u000b').strip('\u000c').strip('\u000d')) == 0:
raise GithubEmptyLinkLabel
if len(final_line
) > md_parser['github']['link']['max_chars_label']:
raise GithubOverflowCharsLinkLabel
# Escape square brackets if not already escaped.
i = 0
while i < len(final_line):
if (final_line[i] == '[' or final_line[i] == ']'):
j = i - 1
consecutive_escape_characters = 0
while j >= 0 and final_line[j] == '\u005c':
consecutive_escape_characters += 1
j -= 1
if ((consecutive_escape_characters > 0
and consecutive_escape_characters % 2 == 0)
or consecutive_escape_characters == 0):
tmp = '\u005c'
else:
tmp = str()
final_line = final_line[0:i] + tmp + final_line[i:len(
final_line)]
i += 1 + len(tmp)
else:
i += 1
elif parser == 'redcarpet':
if line[0] != '#':
return None
i = 0
while (i < len(line)
and i < md_parser['redcarpet']['header']['max_levels']
and line[i] == '#'):
i += 1
current_headers = i
if i < len(line) and line[i] != ' ':
return None
while i < len(line) and line[i] == ' ':
i += 1
end = i
while end < len(line) and line[end] != '\n':
end += 1
while end > 0 and line[end - 1] == '#':
end -= 1
while end > 0 and line[end - 1] == ' ':
end -= 1
if end > i:
final_line = line
if not no_links and len(final_line) > 0 and final_line[-1] == '\\':
final_line += ' '
end += 1
final_line = final_line[i:end]
else:
return None
# TODO: escape or remove '[', ']', '(', ')' in inline links for redcarpet,
# TODO: check link label rules for redcarpet.
return current_headers, final_line | r"""Given a line extract the link label and its type.
:parameter line: the line to be examined.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:parameter no_links: disables the use of links.
:type line: str
:type keep_header_levels: int
:type parser: str
:type np_links: bool
:returns: None if the line does not contain header elements according to
the rules of the selected markdown parser, or a tuple containing the
header type and the trimmed header text, according to the selected
parser rules, otherwise.
:rtype: typing.Optional[tuple]
:raises: GithubEmptyLinkLabel or GithubOverflowCharsLinkLabel or a
built-in exception. | entailment |
def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header | r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions. | entailment |
def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False | r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception. | entailment |
def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None | r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception. | entailment |
def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False | r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception. | entailment |
def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list | r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception. | entailment |
def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list | r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.