signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def process_block(self, block):
|
ret = []<EOL>output = None<EOL>input_lines = None<EOL>lineno = self.IP.execution_count<EOL>input_prompt = self.promptin%lineno<EOL>output_prompt = self.promptout%lineno<EOL>image_file = None<EOL>image_directive = None<EOL>for token, data in block:<EOL><INDENT>if token==COMMENT:<EOL><INDENT>out_data = self.process_comment(data)<EOL><DEDENT>elif token==INPUT:<EOL><INDENT>(out_data, input_lines, output, is_doctest, image_file,<EOL>image_directive) =self.process_input(data, input_prompt, lineno)<EOL><DEDENT>elif token==OUTPUT:<EOL><INDENT>out_data =self.process_output(data, output_prompt,<EOL>input_lines, output, is_doctest,<EOL>image_file)<EOL><DEDENT>if out_data:<EOL><INDENT>ret.extend(out_data)<EOL><DEDENT><DEDENT>if image_file is not None:<EOL><INDENT>self.save_image(image_file)<EOL><DEDENT>return ret, image_directive<EOL>
|
process block from the block_parser and return a list of processed lines
|
f5151:c0:m8
|
def process_pure_python(self, content):
|
output = []<EOL>savefig = False <EOL>multiline = False <EOL>fmtin = self.promptin<EOL>for lineno, line in enumerate(content):<EOL><INDENT>line_stripped = line.strip()<EOL>if not len(line):<EOL><INDENT>output.append(line) <EOL>continue<EOL><DEDENT>if line_stripped.startswith('<STR_LIT:@>'):<EOL><INDENT>output.extend([line])<EOL>if '<STR_LIT>' in line:<EOL><INDENT>savefig = True <EOL><DEDENT>continue<EOL><DEDENT>if line_stripped.startswith('<STR_LIT:#>'):<EOL><INDENT>output.extend([line])<EOL>continue<EOL><DEDENT>if not multiline: <EOL><INDENT>if line_stripped.endswith('<STR_LIT:\\>'): <EOL><INDENT>multiline = True<EOL>cont_len = len(str(lineno)) + <NUM_LIT:2><EOL>line_to_process = line.strip('<STR_LIT:\\>')<EOL>output.extend(["<STR_LIT>" % (fmtin%lineno,line)])<EOL>continue<EOL><DEDENT>else: <EOL><INDENT>line_to_process = line.strip('<STR_LIT:\\>')<EOL><DEDENT><DEDENT>else: <EOL><INDENT>line_to_process += line.strip('<STR_LIT:\\>')<EOL>if line_stripped.endswith('<STR_LIT:\\>'): <EOL><INDENT>continuation = '<STR_LIT:.>' * cont_len<EOL>output.extend([('<STR_LIT>'+line_stripped) % continuation])<EOL>continue<EOL><DEDENT><DEDENT>self.process_input_line(str(line_to_process.strip()),<EOL>store_history=False)<EOL>out_line = self.cout.getvalue()<EOL>self.clear_cout()<EOL>if savefig:<EOL><INDENT>self.ensure_pyplot()<EOL>self.process_input_line('<STR_LIT>', store_history=False)<EOL>self.clear_cout()<EOL>savefig = False<EOL><DEDENT>if not multiline:<EOL><INDENT>in_line = "<STR_LIT>" % (fmtin%lineno,line)<EOL>output.extend([in_line])<EOL><DEDENT>else:<EOL><INDENT>output.extend([('<STR_LIT>'+line_stripped) % continuation])<EOL>multiline = False<EOL><DEDENT>if len(out_line):<EOL><INDENT>output.extend([out_line])<EOL><DEDENT>output.extend(['<STR_LIT>'])<EOL><DEDENT>return output<EOL>
|
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
|
f5151:c0:m10
|
def process_pure_python2(self, content):
|
output = []<EOL>savefig = False <EOL>multiline = False <EOL>multiline_start = None<EOL>fmtin = self.promptin<EOL>ct = <NUM_LIT:0><EOL>content = [line for line in content if len(line.strip()) > <NUM_LIT:0>]<EOL>for lineno, line in enumerate(content):<EOL><INDENT>line_stripped = line.strip()<EOL>if not len(line):<EOL><INDENT>output.append(line)<EOL>continue<EOL><DEDENT>if line_stripped.startswith('<STR_LIT:@>'):<EOL><INDENT>output.extend([line])<EOL>if '<STR_LIT>' in line:<EOL><INDENT>savefig = True <EOL><DEDENT>continue<EOL><DEDENT>if line_stripped.startswith('<STR_LIT:#>'):<EOL><INDENT>output.extend([line])<EOL>continue<EOL><DEDENT>continuation = '<STR_LIT>'% '<STR_LIT>'.join(['<STR_LIT:.>']*(len(str(ct))+<NUM_LIT:2>))<EOL>if not multiline:<EOL><INDENT>modified = "<STR_LIT>" % (fmtin % ct, line_stripped)<EOL>output.append(modified)<EOL>ct += <NUM_LIT:1><EOL>try:<EOL><INDENT>ast.parse(line_stripped)<EOL>output.append('<STR_LIT>')<EOL><DEDENT>except Exception:<EOL><INDENT>multiline = True<EOL>multiline_start = lineno<EOL><DEDENT><DEDENT>else:<EOL><INDENT>modified = '<STR_LIT>' % (continuation, line)<EOL>output.append(modified)<EOL>try:<EOL><INDENT>ast.parse('<STR_LIT:\n>'.join(content[multiline_start:lineno+<NUM_LIT:1>]))<EOL>if (lineno < len(content) - <NUM_LIT:1> and<EOL>_count_indent(content[multiline_start]) <<EOL>_count_indent(content[lineno + <NUM_LIT:1>])):<EOL><INDENT>continue<EOL><DEDENT>output.extend([continuation, '<STR_LIT>'])<EOL>multiline = False<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>continue<EOL><DEDENT>return output<EOL>
|
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
|
f5151:c0:m11
|
def setup(app):
|
<EOL>pass<EOL>
|
Setup as a sphinx extension.
|
f5152:m0
|
def __init__(self, weights_file: str=None, min_face_size: int=<NUM_LIT:20>, steps_threshold: list=None,<EOL>scale_factor: float=<NUM_LIT>):
|
if steps_threshold is None:<EOL><INDENT>steps_threshold = [<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]<EOL><DEDENT>if weights_file is None:<EOL><INDENT>weights_file = pkg_resources.resource_stream('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>self.__min_face_size = min_face_size<EOL>self.__steps_threshold = steps_threshold<EOL>self.__scale_factor = scale_factor<EOL>config = tf.ConfigProto(log_device_placement=False)<EOL>config.gpu_options.allow_growth = True<EOL>self.__graph = tf.Graph()<EOL>with self.__graph.as_default():<EOL><INDENT>self.__session = tf.Session(config=config, graph=self.__graph)<EOL>weights = np.load(weights_file).item()<EOL>self.__pnet = PNet(self.__session, False)<EOL>self.__pnet.set_weights(weights['<STR_LIT>'])<EOL>self.__rnet = RNet(self.__session, False)<EOL>self.__rnet.set_weights(weights['<STR_LIT>'])<EOL>self.__onet = ONet(self.__session, False)<EOL>self.__onet.set_weights(weights['<STR_LIT>'])<EOL><DEDENT>weights_file.close()<EOL>
|
Initializes the MTCNN.
:param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load
the ones bundled with the package.
:param min_face_size: minimum size of the face to detect
:param steps_threshold: step's thresholds values
:param scale_factor: scale factor
|
f5158:c4:m0
|
@staticmethod<EOL><INDENT>def __scale_image(image, scale: float):<DEDENT>
|
height, width, _ = image.shape<EOL>width_scaled = int(np.ceil(width * scale))<EOL>height_scaled = int(np.ceil(height * scale))<EOL>im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)<EOL>im_data_normalized = (im_data - <NUM_LIT>) * <NUM_LIT><EOL>return im_data_normalized<EOL>
|
Scales the image to a given scale.
:param image:
:param scale:
:return:
|
f5158:c4:m4
|
@staticmethod<EOL><INDENT>def __nms(boxes, threshold, method):<DEDENT>
|
if boxes.size == <NUM_LIT:0>:<EOL><INDENT>return np.empty((<NUM_LIT:0>, <NUM_LIT:3>))<EOL><DEDENT>x1 = boxes[:, <NUM_LIT:0>]<EOL>y1 = boxes[:, <NUM_LIT:1>]<EOL>x2 = boxes[:, <NUM_LIT:2>]<EOL>y2 = boxes[:, <NUM_LIT:3>]<EOL>s = boxes[:, <NUM_LIT:4>]<EOL>area = (x2 - x1 + <NUM_LIT:1>) * (y2 - y1 + <NUM_LIT:1>)<EOL>sorted_s = np.argsort(s)<EOL>pick = np.zeros_like(s, dtype=np.int16)<EOL>counter = <NUM_LIT:0><EOL>while sorted_s.size > <NUM_LIT:0>:<EOL><INDENT>i = sorted_s[-<NUM_LIT:1>]<EOL>pick[counter] = i<EOL>counter += <NUM_LIT:1><EOL>idx = sorted_s[<NUM_LIT:0>:-<NUM_LIT:1>]<EOL>xx1 = np.maximum(x1[i], x1[idx])<EOL>yy1 = np.maximum(y1[i], y1[idx])<EOL>xx2 = np.minimum(x2[i], x2[idx])<EOL>yy2 = np.minimum(y2[i], y2[idx])<EOL>w = np.maximum(<NUM_LIT:0.0>, xx2 - xx1 + <NUM_LIT:1>)<EOL>h = np.maximum(<NUM_LIT:0.0>, yy2 - yy1 + <NUM_LIT:1>)<EOL>inter = w * h<EOL>if method is '<STR_LIT>':<EOL><INDENT>o = inter / np.minimum(area[i], area[idx])<EOL><DEDENT>else:<EOL><INDENT>o = inter / (area[i] + area[idx] - inter)<EOL><DEDENT>sorted_s = sorted_s[np.where(o <= threshold)]<EOL><DEDENT>pick = pick[<NUM_LIT:0>:counter]<EOL>return pick<EOL>
|
Non Maximum Suppression.
:param boxes: np array with bounding boxes.
:param threshold:
:param method: NMS method to apply. Available values ('Min', 'Union')
:return:
|
f5158:c4:m6
|
def detect_faces(self, img) -> list:
|
if img is None or not hasattr(img, "<STR_LIT>"):<EOL><INDENT>raise InvalidImage("<STR_LIT>")<EOL><DEDENT>height, width, _ = img.shape<EOL>stage_status = StageStatus(width=width, height=height)<EOL>m = <NUM_LIT:12> / self.__min_face_size<EOL>min_layer = np.amin([height, width]) * m<EOL>scales = self.__compute_scale_pyramid(m, min_layer)<EOL>stages = [self.__stage1, self.__stage2, self.__stage3]<EOL>result = [scales, stage_status]<EOL>for stage in stages:<EOL><INDENT>result = stage(img, result[<NUM_LIT:0>], result[<NUM_LIT:1>])<EOL><DEDENT>[total_boxes, points] = result<EOL>bounding_boxes = []<EOL>for bounding_box, keypoints in zip(total_boxes, points.T):<EOL><INDENT>bounding_boxes.append({<EOL>'<STR_LIT>': [int(bounding_box[<NUM_LIT:0>]), int(bounding_box[<NUM_LIT:1>]),<EOL>int(bounding_box[<NUM_LIT:2>]-bounding_box[<NUM_LIT:0>]), int(bounding_box[<NUM_LIT:3>]-bounding_box[<NUM_LIT:1>])],<EOL>'<STR_LIT>': bounding_box[-<NUM_LIT:1>],<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': (int(keypoints[<NUM_LIT:0>]), int(keypoints[<NUM_LIT:5>])),<EOL>'<STR_LIT>': (int(keypoints[<NUM_LIT:1>]), int(keypoints[<NUM_LIT:6>])),<EOL>'<STR_LIT>': (int(keypoints[<NUM_LIT:2>]), int(keypoints[<NUM_LIT:7>])),<EOL>'<STR_LIT>': (int(keypoints[<NUM_LIT:3>]), int(keypoints[<NUM_LIT:8>])),<EOL>'<STR_LIT>': (int(keypoints[<NUM_LIT:4>]), int(keypoints[<NUM_LIT:9>])),<EOL>}<EOL>}<EOL>)<EOL><DEDENT>return bounding_boxes<EOL>
|
Detects bounding boxes from the specified image.
:param img: image to process
:return: list containing all the bounding boxes detected with their keypoints.
|
f5158:c4:m10
|
def __stage1(self, image, scales: list, stage_status: StageStatus):
|
total_boxes = np.empty((<NUM_LIT:0>, <NUM_LIT:9>))<EOL>status = stage_status<EOL>for scale in scales:<EOL><INDENT>scaled_image = self.__scale_image(image, scale)<EOL>img_x = np.expand_dims(scaled_image, <NUM_LIT:0>)<EOL>img_y = np.transpose(img_x, (<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>))<EOL>out = self.__pnet.feed(img_y)<EOL>out0 = np.transpose(out[<NUM_LIT:0>], (<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>))<EOL>out1 = np.transpose(out[<NUM_LIT:1>], (<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>))<EOL>boxes, _ = self.__generate_bounding_box(out1[<NUM_LIT:0>, :, :, <NUM_LIT:1>].copy(),<EOL>out0[<NUM_LIT:0>, :, :, :].copy(), scale, self.__steps_threshold[<NUM_LIT:0>])<EOL>pick = self.__nms(boxes.copy(), <NUM_LIT:0.5>, '<STR_LIT>')<EOL>if boxes.size > <NUM_LIT:0> and pick.size > <NUM_LIT:0>:<EOL><INDENT>boxes = boxes[pick, :]<EOL>total_boxes = np.append(total_boxes, boxes, axis=<NUM_LIT:0>)<EOL><DEDENT><DEDENT>numboxes = total_boxes.shape[<NUM_LIT:0>]<EOL>if numboxes > <NUM_LIT:0>:<EOL><INDENT>pick = self.__nms(total_boxes.copy(), <NUM_LIT>, '<STR_LIT>')<EOL>total_boxes = total_boxes[pick, :]<EOL>regw = total_boxes[:, <NUM_LIT:2>] - total_boxes[:, <NUM_LIT:0>]<EOL>regh = total_boxes[:, <NUM_LIT:3>] - total_boxes[:, <NUM_LIT:1>]<EOL>qq1 = total_boxes[:, <NUM_LIT:0>] + total_boxes[:, <NUM_LIT:5>] * regw<EOL>qq2 = total_boxes[:, <NUM_LIT:1>] + total_boxes[:, <NUM_LIT:6>] * regh<EOL>qq3 = total_boxes[:, <NUM_LIT:2>] + total_boxes[:, <NUM_LIT:7>] * regw<EOL>qq4 = total_boxes[:, <NUM_LIT:3>] + total_boxes[:, <NUM_LIT:8>] * regh<EOL>total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, <NUM_LIT:4>]]))<EOL>total_boxes = self.__rerec(total_boxes.copy())<EOL>total_boxes[:, <NUM_LIT:0>:<NUM_LIT:4>] = np.fix(total_boxes[:, <NUM_LIT:0>:<NUM_LIT:4>]).astype(np.int32)<EOL>status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),<EOL>width=stage_status.width, height=stage_status.height)<EOL><DEDENT>return total_boxes, status<EOL>
|
First stage of the MTCNN.
:param image:
:param scales:
:param stage_status:
:return:
|
f5158:c4:m11
|
def __stage2(self, img, total_boxes, stage_status:StageStatus):
|
num_boxes = total_boxes.shape[<NUM_LIT:0>]<EOL>if num_boxes == <NUM_LIT:0>:<EOL><INDENT>return total_boxes, stage_status<EOL><DEDENT>tempimg = np.zeros(shape=(<NUM_LIT>, <NUM_LIT>, <NUM_LIT:3>, num_boxes))<EOL>for k in range(<NUM_LIT:0>, num_boxes):<EOL><INDENT>tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), <NUM_LIT:3>))<EOL>tmp[stage_status.dy[k] - <NUM_LIT:1>:stage_status.edy[k], stage_status.dx[k] - <NUM_LIT:1>:stage_status.edx[k], :] =img[stage_status.y[k] - <NUM_LIT:1>:stage_status.ey[k], stage_status.x[k] - <NUM_LIT:1>:stage_status.ex[k], :]<EOL>if tmp.shape[<NUM_LIT:0>] > <NUM_LIT:0> and tmp.shape[<NUM_LIT:1>] > <NUM_LIT:0> or tmp.shape[<NUM_LIT:0>] == <NUM_LIT:0> and tmp.shape[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>tempimg[:, :, :, k] = cv2.resize(tmp, (<NUM_LIT>, <NUM_LIT>), interpolation=cv2.INTER_AREA)<EOL><DEDENT>else:<EOL><INDENT>return np.empty(shape=(<NUM_LIT:0>,)), stage_status<EOL><DEDENT><DEDENT>tempimg = (tempimg - <NUM_LIT>) * <NUM_LIT><EOL>tempimg1 = np.transpose(tempimg, (<NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>))<EOL>out = self.__rnet.feed(tempimg1)<EOL>out0 = np.transpose(out[<NUM_LIT:0>])<EOL>out1 = np.transpose(out[<NUM_LIT:1>])<EOL>score = out1[<NUM_LIT:1>, :]<EOL>ipass = np.where(score > self.__steps_threshold[<NUM_LIT:1>])<EOL>total_boxes = np.hstack([total_boxes[ipass[<NUM_LIT:0>], <NUM_LIT:0>:<NUM_LIT:4>].copy(), np.expand_dims(score[ipass].copy(), <NUM_LIT:1>)])<EOL>mv = out0[:, ipass[<NUM_LIT:0>]]<EOL>if total_boxes.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>pick = self.__nms(total_boxes, <NUM_LIT>, '<STR_LIT>')<EOL>total_boxes = total_boxes[pick, :]<EOL>total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))<EOL>total_boxes = self.__rerec(total_boxes.copy())<EOL><DEDENT>return total_boxes, stage_status<EOL>
|
Second stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:
|
f5158:c4:m12
|
def __stage3(self, img, total_boxes, stage_status: StageStatus):
|
num_boxes = total_boxes.shape[<NUM_LIT:0>]<EOL>if num_boxes == <NUM_LIT:0>:<EOL><INDENT>return total_boxes, np.empty(shape=(<NUM_LIT:0>,))<EOL><DEDENT>total_boxes = np.fix(total_boxes).astype(np.int32)<EOL>status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),<EOL>width=stage_status.width, height=stage_status.height)<EOL>tempimg = np.zeros((<NUM_LIT>, <NUM_LIT>, <NUM_LIT:3>, num_boxes))<EOL>for k in range(<NUM_LIT:0>, num_boxes):<EOL><INDENT>tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), <NUM_LIT:3>))<EOL>tmp[status.dy[k] - <NUM_LIT:1>:status.edy[k], status.dx[k] - <NUM_LIT:1>:status.edx[k], :] =img[status.y[k] - <NUM_LIT:1>:status.ey[k], status.x[k] - <NUM_LIT:1>:status.ex[k], :]<EOL>if tmp.shape[<NUM_LIT:0>] > <NUM_LIT:0> and tmp.shape[<NUM_LIT:1>] > <NUM_LIT:0> or tmp.shape[<NUM_LIT:0>] == <NUM_LIT:0> and tmp.shape[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>tempimg[:, :, :, k] = cv2.resize(tmp, (<NUM_LIT>, <NUM_LIT>), interpolation=cv2.INTER_AREA)<EOL><DEDENT>else:<EOL><INDENT>return np.empty(shape=(<NUM_LIT:0>,)), np.empty(shape=(<NUM_LIT:0>,))<EOL><DEDENT><DEDENT>tempimg = (tempimg - <NUM_LIT>) * <NUM_LIT><EOL>tempimg1 = np.transpose(tempimg, (<NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>))<EOL>out = self.__onet.feed(tempimg1)<EOL>out0 = np.transpose(out[<NUM_LIT:0>])<EOL>out1 = np.transpose(out[<NUM_LIT:1>])<EOL>out2 = np.transpose(out[<NUM_LIT:2>])<EOL>score = out2[<NUM_LIT:1>, :]<EOL>points = out1<EOL>ipass = np.where(score > self.__steps_threshold[<NUM_LIT:2>])<EOL>points = points[:, ipass[<NUM_LIT:0>]]<EOL>total_boxes = np.hstack([total_boxes[ipass[<NUM_LIT:0>], <NUM_LIT:0>:<NUM_LIT:4>].copy(), np.expand_dims(score[ipass].copy(), <NUM_LIT:1>)])<EOL>mv = out0[:, ipass[<NUM_LIT:0>]]<EOL>w = total_boxes[:, <NUM_LIT:2>] - total_boxes[:, <NUM_LIT:0>] + <NUM_LIT:1><EOL>h = total_boxes[:, <NUM_LIT:3>] - total_boxes[:, <NUM_LIT:1>] + <NUM_LIT:1><EOL>points[<NUM_LIT:0>:<NUM_LIT:5>, :] = np.tile(w, (<NUM_LIT:5>, <NUM_LIT:1>)) * points[<NUM_LIT:0>:<NUM_LIT:5>, :] + np.tile(total_boxes[:, <NUM_LIT:0>], (<NUM_LIT:5>, <NUM_LIT:1>)) - <NUM_LIT:1><EOL>points[<NUM_LIT:5>:<NUM_LIT:10>, :] = np.tile(h, (<NUM_LIT:5>, <NUM_LIT:1>)) * points[<NUM_LIT:5>:<NUM_LIT:10>, :] + np.tile(total_boxes[:, <NUM_LIT:1>], (<NUM_LIT:5>, <NUM_LIT:1>)) - <NUM_LIT:1><EOL>if total_boxes.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))<EOL>pick = self.__nms(total_boxes.copy(), <NUM_LIT>, '<STR_LIT>')<EOL>total_boxes = total_boxes[pick, :]<EOL>points = points[:, pick]<EOL><DEDENT>return total_boxes, points<EOL>
|
Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:
|
f5158:c4:m13
|
def __init__(self, session, trainable: bool=True):
|
self._session = session<EOL>self.__trainable = trainable<EOL>self.__layers = {}<EOL>self.__last_layer_name = None<EOL>with tf.variable_scope(self.__class__.__name__.lower()):<EOL><INDENT>self._config()<EOL><DEDENT>
|
Initializes the network.
:param trainable: flag to determine if this network should be trainable or not.
|
f5159:c0:m0
|
def _config(self):
|
raise NotImplementedError("<STR_LIT>")<EOL>
|
Configures the network layers.
It is usually done using the LayerFactory() class.
|
f5159:c0:m1
|
def add_layer(self, name: str, layer_output):
|
self.__layers[name] = layer_output<EOL>self.__last_layer_name = name<EOL>
|
Adds a layer to the network.
:param name: name of the layer to add
:param layer_output: output layer.
|
f5159:c0:m2
|
def get_layer(self, name: str=None):
|
if name is None:<EOL><INDENT>name = self.__last_layer_name<EOL><DEDENT>return self.__layers[name]<EOL>
|
Retrieves the layer by its name.
:param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the
network.
:return: layer output
|
f5159:c0:m3
|
def is_trainable(self):
|
return self.__trainable<EOL>
|
Getter for the trainable flag.
|
f5159:c0:m4
|
def set_weights(self, weights_values: dict, ignore_missing=False):
|
network_name = self.__class__.__name__.lower()<EOL>with tf.variable_scope(network_name):<EOL><INDENT>for layer_name in weights_values:<EOL><INDENT>with tf.variable_scope(layer_name, reuse=True):<EOL><INDENT>for param_name, data in weights_values[layer_name].items():<EOL><INDENT>try:<EOL><INDENT>var = tf.get_variable(param_name)<EOL>self._session.run(var.assign(data))<EOL><DEDENT>except ValueError:<EOL><INDENT>if not ignore_missing:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>
|
Sets the weights values of the network.
:param weights_values: dictionary with weights for each layer
|
f5159:c0:m5
|
def feed(self, image):
|
network_name = self.__class__.__name__.lower()<EOL>with tf.variable_scope(network_name):<EOL><INDENT>return self._feed(image)<EOL><DEDENT>
|
Feeds the network with an image
:param image: image (perhaps loaded with CV2)
:return: network result
|
f5159:c0:m6
|
def __make_var(self, name: str, shape: list):
|
return tf.get_variable(name, shape, trainable=self.__network.is_trainable())<EOL>
|
Creates a tensorflow variable with the given name and shape.
:param name: name to set for the variable.
:param shape: list defining the shape of the variable.
:return: created TF variable.
|
f5160:c0:m4
|
def new_feed(self, name: str, layer_shape: tuple):
|
feed_data = tf.placeholder(tf.float32, layer_shape, '<STR_LIT:input>')<EOL>self.__network.add_layer(name, layer_output=feed_data)<EOL>
|
Creates a feed layer. This is usually the first layer in the network.
:param name: name of the layer
:return:
|
f5160:c0:m5
|
def new_conv(self, name: str, kernel_size: tuple, channels_output: int,<EOL>stride_size: tuple, padding: str='<STR_LIT>',<EOL>group: int=<NUM_LIT:1>, biased: bool=True, relu: bool=True, input_layer_name: str=None):
|
<EOL>self.__validate_padding(padding)<EOL>input_layer = self.__network.get_layer(input_layer_name)<EOL>channels_input = int(input_layer.get_shape()[-<NUM_LIT:1>])<EOL>self.__validate_grouping(channels_input, channels_output, group)<EOL>convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [<NUM_LIT:1>, stride_size[<NUM_LIT:1>], stride_size[<NUM_LIT:0>], <NUM_LIT:1>],<EOL>padding=padding)<EOL>with tf.variable_scope(name) as scope:<EOL><INDENT>kernel = self.__make_var('<STR_LIT>', shape=[kernel_size[<NUM_LIT:1>], kernel_size[<NUM_LIT:0>], channels_input // group, channels_output])<EOL>output = convolve(input_layer, kernel)<EOL>if biased:<EOL><INDENT>biases = self.__make_var('<STR_LIT>', [channels_output])<EOL>output = tf.nn.bias_add(output, biases)<EOL><DEDENT>if relu:<EOL><INDENT>output = tf.nn.relu(output, name=scope.name)<EOL><DEDENT><DEDENT>self.__network.add_layer(name, layer_output=output)<EOL>
|
Creates a convolution layer for the network.
:param name: name for the layer
:param kernel_size: tuple containing the size of the kernel (Width, Height)
:param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size.
:param stride_size: tuple containing the size of the stride (Width, Height)
:param padding: Type of padding. Available values are: ('SAME', 'VALID')
:param group: groups for the kernel operation. More info required.
:param biased: boolean flag to set if biased or not.
:param relu: boolean flag to set if ReLu should be applied at the end of the layer or not.
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
|
f5160:c0:m6
|
def new_prelu(self, name: str, input_layer_name: str=None):
|
input_layer = self.__network.get_layer(input_layer_name)<EOL>with tf.variable_scope(name):<EOL><INDENT>channels_input = int(input_layer.get_shape()[-<NUM_LIT:1>])<EOL>alpha = self.__make_var('<STR_LIT>', shape=[channels_input])<EOL>output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer))<EOL><DEDENT>self.__network.add_layer(name, layer_output=output)<EOL>
|
Creates a new prelu layer with the given name and input.
:param name: name for this layer.
:param input_layer_name: name of the layer that serves as input for this one.
|
f5160:c0:m7
|
def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='<STR_LIT>',<EOL>input_layer_name: str=None):
|
self.__validate_padding(padding)<EOL>input_layer = self.__network.get_layer(input_layer_name)<EOL>output = tf.nn.max_pool(input_layer,<EOL>ksize=[<NUM_LIT:1>, kernel_size[<NUM_LIT:1>], kernel_size[<NUM_LIT:0>], <NUM_LIT:1>],<EOL>strides=[<NUM_LIT:1>, stride_size[<NUM_LIT:1>], stride_size[<NUM_LIT:0>], <NUM_LIT:1>],<EOL>padding=padding,<EOL>name=name)<EOL>self.__network.add_layer(name, layer_output=output)<EOL>
|
Creates a new max pooling layer.
:param name: name for the layer.
:param kernel_size: tuple containing the size of the kernel (Width, Height)
:param stride_size: tuple containing the size of the stride (Width, Height)
:param padding: Type of padding. Available values are: ('SAME', 'VALID')
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
|
f5160:c0:m8
|
def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None):
|
with tf.variable_scope(name):<EOL><INDENT>input_layer = self.__network.get_layer(input_layer_name)<EOL>vectorized_input, dimension = self.vectorize_input(input_layer)<EOL>weights = self.__make_var('<STR_LIT>', shape=[dimension, output_count])<EOL>biases = self.__make_var('<STR_LIT>', shape=[output_count])<EOL>operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b<EOL>fc = operation(vectorized_input, weights, biases, name=name)<EOL><DEDENT>self.__network.add_layer(name, layer_output=fc)<EOL>
|
Creates a new fully connected layer.
:param name: name for the layer.
:param output_count: number of outputs of the fully connected layer.
:param relu: boolean flag to set if ReLu should be applied at the end of this layer.
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
|
f5160:c0:m9
|
def new_softmax(self, name, axis, input_layer_name: str=None):
|
input_layer = self.__network.get_layer(input_layer_name)<EOL>if LooseVersion(tf.__version__) < LooseVersion("<STR_LIT>"):<EOL><INDENT>max_axis = tf.reduce_max(input_layer, axis, keep_dims=True)<EOL>target_exp = tf.exp(input_layer - max_axis)<EOL>normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)<EOL><DEDENT>else:<EOL><INDENT>max_axis = tf.reduce_max(input_layer, axis, keepdims=True)<EOL>target_exp = tf.exp(input_layer - max_axis)<EOL>normalize = tf.reduce_sum(target_exp, axis, keepdims=True)<EOL><DEDENT>softmax = tf.div(target_exp, normalize, name)<EOL>self.__network.add_layer(name, layer_output=softmax)<EOL>
|
Creates a new softmax layer
:param name: name to set for the layer
:param axis:
:param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of
the network.
|
f5160:c0:m10
|
def dameraulevenshtein(seq1, seq2):
|
<EOL>oneago = None<EOL>thisrow = list(range_(<NUM_LIT:1>, len(seq2) + <NUM_LIT:1>)) + [<NUM_LIT:0>]<EOL>for x in range_(len(seq1)):<EOL><INDENT>twoago, oneago, thisrow = oneago, thisrow, [<NUM_LIT:0>] * len(seq2) + [x + <NUM_LIT:1>]<EOL>for y in range_(len(seq2)):<EOL><INDENT>delcost = oneago[y] + <NUM_LIT:1><EOL>addcost = thisrow[y - <NUM_LIT:1>] + <NUM_LIT:1><EOL>subcost = oneago[y - <NUM_LIT:1>] + (seq1[x] != seq2[y])<EOL>thisrow[y] = min(delcost, addcost, subcost)<EOL>if (x > <NUM_LIT:0> and y > <NUM_LIT:0> and seq1[x] == seq2[y - <NUM_LIT:1>] and<EOL>seq1[x - <NUM_LIT:1>] == seq2[y] and seq1[x] != seq2[y]):<EOL><INDENT>thisrow[y] = min(thisrow[y], twoago[y - <NUM_LIT:2>] + <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>return thisrow[len(seq2) - <NUM_LIT:1>]<EOL>
|
Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
|
f5162:m0
|
def evaluation_metrics(predicted, actual, bow=True):
|
if bow:<EOL><INDENT>p = set(predicted)<EOL>a = set(actual)<EOL>true_positive = <NUM_LIT:0><EOL>for token in p:<EOL><INDENT>if token in a:<EOL><INDENT>true_positive += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>from collections import defaultdict<EOL>act = defaultdict(lambda: <NUM_LIT:0>)<EOL>for token in actual:<EOL><INDENT>act[token] += <NUM_LIT:1><EOL><DEDENT>true_positive = <NUM_LIT:0><EOL>for token in predicted:<EOL><INDENT>if act[token] > <NUM_LIT:0>:<EOL><INDENT>true_positive += <NUM_LIT:1><EOL>act[token] -= <NUM_LIT:1><EOL><DEDENT><DEDENT>p = predicted<EOL>a = actual<EOL><DEDENT>try:<EOL><INDENT>precision = true_positive / len(p)<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>precision = <NUM_LIT:0.0><EOL><DEDENT>try:<EOL><INDENT>recall = true_positive / len(a)<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>recall = <NUM_LIT:0.0><EOL><DEDENT>try:<EOL><INDENT>f1 = <NUM_LIT> * (precision * recall) / (precision + recall)<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>f1 = <NUM_LIT:0.0><EOL><DEDENT>return (precision, recall, f1)<EOL>
|
Input:
predicted, actual = lists of the predicted and actual tokens
bow: if true use bag of words assumption
Returns:
precision, recall, F1, Levenshtein distance
|
f5162:m1
|
def get_and_union_features(features):
|
if not features:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(features, (list, tuple)):<EOL><INDENT>if isinstance(features[<NUM_LIT:0>], tuple):<EOL><INDENT>return FeatureUnion(features)<EOL><DEDENT>elif isinstance(features[<NUM_LIT:0>], string_):<EOL><INDENT>return FeatureUnion([(feature, get_feature(feature)) for feature in features])<EOL><DEDENT>else:<EOL><INDENT>return make_union(*features)<EOL><DEDENT><DEDENT>elif isinstance(features, string_):<EOL><INDENT>return get_feature(features)<EOL><DEDENT>else:<EOL><INDENT>return features<EOL><DEDENT>
|
Get and combine features in a :class:`FeatureUnion`.
Args:
features (str or List[str], ``Features`` or List[``Features``], or List[Tuple[str, ``Features``]]):
One or more features to be used to transform blocks into a matrix of
numeric values. If more than one, a :class:`FeatureUnion` is
automatically constructed. Example inputs::
features = 'weninger'
features = ['weninger', 'kohlschuetter']
features = WeningerFeatures()
features = [WeningerFeatures(), KohlschuetterFeatures()]
features = [('weninger', WeningerFeatures()), ('kohlschuetter', KohlschuetterFeatures())]
Returns:
:class:`FeatureUnion` or ``Features``
|
f5162:m2
|
def load_pickled_model(filename, dirname=None):
|
if dirname is None:<EOL><INDENT>pkg_filename = pkgutil.get_loader('<STR_LIT>').get_filename('<STR_LIT>')<EOL>pkg_dirname = os.path.dirname(pkg_filename)<EOL>dirname = os.path.join(pkg_dirname, '<STR_LIT>', model_path)<EOL><DEDENT>filepath = os.path.join(dirname, filename)<EOL>return joblib.load(filepath)<EOL>
|
Load a pickled ``Extractor`` model from disk.
Args:
filename (str): Name of pickled model file under ``dirname``.
dirname (str): Name of directory on disk containing the pickled model.
If None, dragnet's default pickled model directory is used:
/path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION]
Returns:
:class:`dragnet.extractor.Extractor`
|
f5162:m3
|
def str_cast(maybe_bytes, encoding='<STR_LIT:utf-8>'):
|
if isinstance(maybe_bytes, bytes_):<EOL><INDENT>return maybe_bytes.decode(encoding)<EOL><DEDENT>else:<EOL><INDENT>return maybe_bytes<EOL><DEDENT>
|
Converts any bytes-like input to a string-like output, with respect to
python version
Parameters
----------
maybe_bytes : if this is a bytes-like object, it will be converted to a string
encoding : str, default='utf-8'
encoding to be used when decoding bytes
|
f5163:m0
|
def bytes_cast(maybe_str, encoding='<STR_LIT:utf-8>'):
|
if isinstance(maybe_str, unicode_):<EOL><INDENT>return maybe_str.encode(encoding)<EOL><DEDENT>else:<EOL><INDENT>return maybe_str<EOL><DEDENT>
|
Converts any string-like input to a bytes-like output, with respect to
python version
Parameters
----------
maybe_str : if this is a string-like object, it will be converted to bytes
encoding : str, default='utf-8'
encoding to be used when encoding string
|
f5163:m1
|
def str_list_cast(list_, **kwargs):
|
return [str_cast(elem, **kwargs) for elem in list_]<EOL>
|
Converts any bytes-like items in input list to string-like values, with
respect to python version
Parameters
----------
list_ : list
any bytes-like objects contained in the list will be converted to
strings
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when decoding bytes
|
f5163:m2
|
def bytes_list_cast(list_, **kwargs):
|
return [bytes_cast(elem, **kwargs) for elem in list_]<EOL>
|
Converts any string-like items in input list to bytes-like values, with
respect to python version
Parameters
----------
list_ : list
any string-like objects contained in the list will be converted to bytes
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when encoding string
|
f5163:m3
|
def str_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs):
|
new_keys = str_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys()<EOL>new_vals = str_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values()<EOL>new_dict = dict(zip_(new_keys, new_vals))<EOL>return new_dict<EOL>
|
Converts any bytes-like items in input dict to string-like values, with
respect to python version
Parameters
----------
dict_ : dict
any bytes-like objects contained in the dict will be converted to a
string
include_keys : bool, default=True
if True, cast keys to a string, else ignore
include_values : bool, default=True
if True, cast values to a string, else ignore
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when decoding bytes
|
f5163:m4
|
def bytes_dict_cast(dict_, include_keys=True, include_vals=True, **kwargs):
|
new_keys = bytes_list_cast(dict_.keys(), **kwargs) if include_keys else dict_.keys()<EOL>new_vals = bytes_list_cast(dict_.values(), **kwargs) if include_vals else dict_.values()<EOL>new_dict = dict(zip_(new_keys, new_vals))<EOL>return new_dict<EOL>
|
Converts any string-like items in input dict to bytes-like values, with
respect to python version
Parameters
----------
dict_ : dict
any string-like objects contained in the dict will be converted to bytes
include_keys : bool, default=True
if True, cast keys to bytes, else ignore
include_values : bool, default=True
if True, cast values to bytes, else ignore
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when encoding string
|
f5163:m5
|
def str_block_cast(block,<EOL>include_text=True,<EOL>include_link_tokens=True,<EOL>include_css=True,<EOL>include_features=True,<EOL>**kwargs):
|
if include_text:<EOL><INDENT>block.text = str_cast(block.text, **kwargs)<EOL><DEDENT>if include_link_tokens:<EOL><INDENT>block.link_tokens = str_list_cast(block.link_tokens, **kwargs)<EOL><DEDENT>if include_css:<EOL><INDENT>block.css = str_dict_cast(block.css, **kwargs)<EOL><DEDENT>if include_features:<EOL><INDENT>block.features = str_dict_cast(block.features, **kwargs)<EOL><DEDENT>return block<EOL>
|
Converts any bytes-like items in input Block object to string-like values,
with respect to python version
Parameters
----------
block : blocks.Block
any bytes-like objects contained in the block object will be converted
to a string
include_text : bool, default=True
if True, cast text to a string, else ignore
include_link_tokens : bool, default=True
if True, cast link_tokens to a string, else ignore
include_css : bool, default=True
if True, cast css to a string, else ignore
include_features : bool, default=True
if True, cast features to a string, else ignore
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when decoding bytes
|
f5163:m6
|
def bytes_block_cast(block,<EOL>include_text=True,<EOL>include_link_tokens=True,<EOL>include_css=True,<EOL>include_features=True,<EOL>**kwargs):
|
if include_text:<EOL><INDENT>block.text = bytes_cast(block.text, **kwargs)<EOL><DEDENT>if include_link_tokens:<EOL><INDENT>block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs)<EOL><DEDENT>if include_css:<EOL><INDENT>block.css = bytes_dict_cast(block.css, **kwargs)<EOL><DEDENT>if include_features:<EOL><INDENT>block.features = bytes_dict_cast(block.features, **kwargs)<EOL><DEDENT>return block<EOL>
|
Converts any string-like items in input Block object to bytes-like values,
with respect to python version
Parameters
----------
block : blocks.Block
any string-like objects contained in the block object will be converted
to bytes
include_text : bool, default=True
if True, cast text to bytes, else ignore
include_link_tokens : bool, default=True
if True, cast link_tokens to bytes, else ignore
include_css : bool, default=True
if True, cast css to bytes, else ignore
include_features : bool, default=True
if True, cast features to bytes, else ignore
kwargs:
encoding: str, default: 'utf-8'
encoding to be used when encoding string
|
f5163:m7
|
def str_block_list_cast(blocks, **kwargs):
|
return [str_block_cast(block, **kwargs) for block in blocks]<EOL>
|
Converts any bytes-like items in input lxml.Blocks to string-like values,
with respect to python version
Parameters
----------
blocks : list[lxml.Block]
any bytes-like objects contained in the block object will be converted
to a string
kwargs:
include_text : bool, default=True
if True, cast text to a string, else ignore
include_link_tokens : bool, default=True
if True, cast link_tokens to a string, else ignore
include_css : bool, default=True
if True, cast css to a string, else ignore
include_features : bool, default=True
if True, cast features to a string, else ignore
encoding: str, default: 'utf-8'
encoding to be used when decoding bytes
|
f5163:m8
|
def bytes_block_list_cast(blocks, **kwargs):
|
return [bytes_block_cast(block, **kwargs) for block in blocks]<EOL>
|
Converts any string-like items in input lxml.Blocks to bytes-like values,
with respect to python version
Parameters
----------
blocks : list[lxml.Block]
any string-like objects contained in the block object will be converted
to bytes
kwargs:
include_text : bool, default=True
if True, cast text to bytes, else ignore
include_link_tokens : bool, default=True
if True, cast link_tokens to bytes, else ignore
include_css : bool, default=True
if True, cast css to bytes, else ignore
include_features : bool, default=True
if True, cast features to bytes, else ignore
encoding: str, default: 'utf-8'
encoding to be used when decoding bytes
|
f5163:m9
|
def evaluate_model_predictions(y_true, y_pred, weights=None):
|
if isinstance(y_pred[<NUM_LIT:0>], np.ndarray):<EOL><INDENT>y_pred = np.concatenate(y_pred)<EOL><DEDENT>if isinstance(y_true[<NUM_LIT:0>], np.ndarray):<EOL><INDENT>y_true = np.concatenate(y_true)<EOL><DEDENT>if (weights is not None) and (isinstance(weights[<NUM_LIT:0>], np.ndarray)):<EOL><INDENT>weights = np.concatenate(weights)<EOL><DEDENT>accuracy = accuracy_score(<EOL>y_true, y_pred, normalize=True, sample_weight=weights)<EOL>precision = precision_score(<EOL>y_true, y_pred, average='<STR_LIT>', pos_label=<NUM_LIT:1>, sample_weight=weights)<EOL>recall = recall_score(<EOL>y_true, y_pred, average='<STR_LIT>', pos_label=<NUM_LIT:1>, sample_weight=weights)<EOL>f1 = f1_score(<EOL>y_true, y_pred, average='<STR_LIT>', pos_label=<NUM_LIT:1>, sample_weight=weights)<EOL>return {'<STR_LIT>': accuracy, '<STR_LIT>': precision, '<STR_LIT>': recall, '<STR_LIT>': f1}<EOL>
|
Evaluate the performance of an extractor model's binary classification
predictions, typically at the block level, of whether a block is content
or not.
Args:
y_true (``np.ndarray``)
y_pred (``np.ndarray``)
weights (``np.ndarray``)
Returns:
Dict[str, float]
|
f5164:m0
|
def evaluate_extracted_tokens(gold_content, extr_content):
|
if isinstance(gold_content, string_):<EOL><INDENT>gold_content = simple_tokenizer(gold_content)<EOL><DEDENT>if isinstance(extr_content, string_):<EOL><INDENT>extr_content = simple_tokenizer(extr_content)<EOL><DEDENT>gold_set = set(gold_content)<EOL>extr_set = set(extr_content)<EOL>jaccard = len(gold_set & extr_set) / len(gold_set | extr_set)<EOL>levenshtein = dameraulevenshtein(gold_content, extr_content)<EOL>return {'<STR_LIT>': jaccard, '<STR_LIT>': levenshtein}<EOL>
|
Evaluate the similarity between gold-standard and extracted content,
typically for a single HTML document, as another way of evaluating the
performance of an extractor model.
Args:
gold_content (str or Sequence[str]): Gold-standard content, either as a
string or as an already-tokenized list of tokens.
extr_content (str or Sequence[str]): Extracted content, either as a
string or as an already-tokenized list of tokens.
Returns:
Dict[str, float]
|
f5164:m1
|
def train_model(extractor, data_dir, output_dir=None):
|
<EOL>output_dir, fname_prefix = _set_up_output_dir_and_fname_prefix(output_dir, extractor)<EOL>logging.info('<STR_LIT>')<EOL>data = prepare_all_data(data_dir)<EOL>training_data, test_data = train_test_split(<EOL>data, test_size=<NUM_LIT>, random_state=<NUM_LIT>)<EOL>train_html, train_labels, train_weights = extractor.get_html_labels_weights(training_data)<EOL>test_html, test_labels, test_weights = extractor.get_html_labels_weights(test_data)<EOL>logging.info('<STR_LIT>')<EOL>try:<EOL><INDENT>extractor.fit(train_html, train_labels, weights=train_weights)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>extractor.fit(train_html, train_labels)<EOL><DEDENT>train_eval = evaluate_model_predictions(<EOL>np.concatenate(train_labels), extractor.predict(train_html),<EOL>np.concatenate(train_weights))<EOL>test_eval = evaluate_model_predictions(<EOL>np.concatenate(test_labels), extractor.predict(test_html),<EOL>np.concatenate(test_weights))<EOL>_report_model_performance(output_dir, fname_prefix, train_eval, test_eval)<EOL>_write_model_to_disk(output_dir, fname_prefix, extractor)<EOL>return extractor<EOL>
|
Train an extractor model, then write train/test block-level classification
performance as well as the model itself to disk in ``output_dir``.
Args:
extractor (:class:`Extractor`): Instance of the ``Extractor`` class to
be trained.
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html and gold standard blocks files
output_dir (str): Directory on disk to which the trained model files,
errors, etc. are to be written. If None, outputs are not saved.
Returns:
:class:`Extractor`: A trained extractor model.
|
f5164:m2
|
def train_many_models(extractor, param_grid, data_dir, output_dir=None,<EOL>**kwargs):
|
<EOL>output_dir, fname_prefix = _set_up_output_dir_and_fname_prefix(output_dir, extractor)<EOL>logging.info('<STR_LIT>')<EOL>data = prepare_all_data(data_dir)<EOL>training_data, test_data = train_test_split(<EOL>data, test_size=<NUM_LIT>, random_state=<NUM_LIT>)<EOL>train_html, train_labels, train_weights = extractor.get_html_labels_weights(training_data)<EOL>test_html, test_labels, test_weights = extractor.get_html_labels_weights(test_data)<EOL>train_blocks = np.array([extractor.blockifier.blockify(doc)<EOL>for doc in train_html])<EOL>train_mask = [extractor._has_enough_blocks(blocks) for blocks in train_blocks]<EOL>train_blocks = train_blocks[train_mask]<EOL>train_labels = np.concatenate(train_labels[train_mask])<EOL>train_weights = np.concatenate(train_weights[train_mask])<EOL>test_labels = np.concatenate(test_labels)<EOL>test_weights = np.concatenate(test_weights)<EOL>train_features = np.concatenate([extractor.features.fit_transform(blocks)<EOL>for blocks in train_blocks])<EOL>gscv = GridSearchCV(<EOL>extractor.model, param_grid, fit_params={'<STR_LIT>': train_weights},<EOL>scoring=kwargs.get('<STR_LIT>', '<STR_LIT>'), cv=kwargs.get('<STR_LIT>', <NUM_LIT:5>),<EOL>n_jobs=kwargs.get('<STR_LIT>', <NUM_LIT:1>), verbose=kwargs.get('<STR_LIT>', <NUM_LIT:1>))<EOL>gscv = gscv.fit(train_features, train_labels)<EOL>logging.info('<STR_LIT>', gscv.best_score_)<EOL>logging.info('<STR_LIT>', gscv.best_params_)<EOL>extractor.model = gscv.best_estimator_<EOL>train_eval = evaluate_model_predictions(<EOL>train_labels, extractor.predict(train_html[train_mask]), weights=train_weights)<EOL>test_eval = evaluate_model_predictions(<EOL>test_labels, extractor.predict(test_html), weights=test_weights)<EOL>_write_model_to_disk(output_dir, fname_prefix, extractor)<EOL>return extractor<EOL>
|
Train many extractor models, then for the best-scoring model, write
train/test block-level classification performance as well as the model itself
to disk in ``output_dir``.
Args:
extractor (:class:`Extractor`): Instance of the ``Extractor`` class to
be trained.
param_grid (dict or List[dict]): Dictionary with parameters names (str)
as keys and lists of parameter settings to try as values, or a list
of such dictionaries, in which case the grids spanned by each are
explored. See documentation for :class:`GridSearchCV` for details.
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html and gold standard blocks files
output_dir (str): Directory on disk to which the trained model files,
errors, etc. are to be written. If None, outputs are not saved.
**kwargs:
scoring (str or Callable): default 'f1'
cv (int): default 5
n_jobs (int): default 1
verbose (int): default 1
Returns:
:class:`Extractor`: The trained extractor model with the best-scoring
set of params.
See Also:
Documentation for grid search :class:`GridSearchCV` in ``scikit-learn``:
http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
|
f5164:m3
|
def extract_all_gold_standard_data(data_dir, nprocesses=<NUM_LIT:1>,<EOL>overwrite=False, **kwargs):
|
use_pool = nprocesses > <NUM_LIT:1><EOL>if use_pool:<EOL><INDENT>pool = multiprocessing.Pool(processes=nprocesses)<EOL><DEDENT>if overwrite is False:<EOL><INDENT>gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)<EOL>if not os.path.isdir(gs_blocks_dir):<EOL><INDENT>os.mkdir(gs_blocks_dir)<EOL><DEDENT>gs_blocks_filenames = get_filenames(<EOL>gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))<EOL>gs_blocks_fileroots = {<EOL>re.search(r'<STR_LIT>' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(<NUM_LIT:1>)<EOL>for gs_blocks_filename in gs_blocks_filenames}<EOL><DEDENT>else:<EOL><INDENT>gs_blocks_fileroots = set()<EOL><DEDENT>gs_dir = os.path.join(data_dir, GOLD_STANDARD_DIRNAME)<EOL>gs_filenames = get_filenames(<EOL>gs_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_EXT))<EOL>for i, gs_filename in enumerate(gs_filenames):<EOL><INDENT>gs_fileroot = re.search(r'<STR_LIT>' + re.escape(GOLD_STANDARD_EXT), gs_filename).group(<NUM_LIT:1>)<EOL>if gs_fileroot in gs_blocks_fileroots:<EOL><INDENT>continue<EOL><DEDENT>if i % <NUM_LIT:100> == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>'.format(gs_filename))<EOL><DEDENT>if use_pool:<EOL><INDENT>pool.apply_async(extract_gold_standard_blocks, (data_dir, gs_fileroot), kwargs)<EOL><DEDENT>else:<EOL><INDENT>extract_gold_standard_blocks(data_dir, gs_fileroot, **kwargs)<EOL><DEDENT><DEDENT>if use_pool:<EOL><INDENT>pool.close()<EOL>pool.join()<EOL><DEDENT>
|
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``.
Args:
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html files and gold standard content +
comments text files
nprocesses (int): If > 1, use a :class:`multiprocessing.Pool` to
parallelize the extractions
overwrite (bool): If True, overwrite existing gold-standard blocks files.
**kwargs: passed into :func:`extract_gold_standard_blocks`
See Also:
:func:`extract_gold_standard_blocks`
|
f5165:m0
|
def extract_gold_standard_blocks(data_dir, fileroot, encoding=None,<EOL>tokenizer=simple_tokenizer, cetr=False):
|
<EOL>raw_html = read_html_file(data_dir, fileroot, encoding=encoding) <EOL>from dragnet.blocks import BlockifyError<EOL>try:<EOL><INDENT>blocks = [b.text for b in Blockifier.blockify(raw_html)] <EOL><DEDENT>except BlockifyError as e:<EOL><INDENT>print('<STR_LIT>'.format(fileroot))<EOL>return<EOL><DEDENT>blocks_tokens = [tokenizer(block) for block in blocks]<EOL>num_blocks_tokens = [len(block_tokens) for block_tokens in blocks_tokens]<EOL>all_blocks_tokens = []<EOL>all_blocks_tokens_block_id = []<EOL>for i, block_tokens in enumerate(blocks_tokens):<EOL><INDENT>all_blocks_tokens.extend(block_tokens)<EOL>all_blocks_tokens_block_id.extend([i] * len(block_tokens))<EOL><DEDENT>def get_frac_and_str_tokens_in_gs(gs_txt):<EOL><INDENT>"""<STR_LIT>"""<EOL>gs_tokens = tokenizer(gs_txt)<EOL>tokens_in_gs = check_inclusion(all_blocks_tokens, gs_tokens)<EOL>num_blocks_tokens_in_gs = [<NUM_LIT:0> for _ in range(len(blocks))]<EOL>blocks_tokens_in_gs_tokens = [[] for _ in range(len(blocks))]<EOL>for token, token_in_gs, block_id in zip(all_blocks_tokens, tokens_in_gs, all_blocks_tokens_block_id):<EOL><INDENT>if token_in_gs is True:<EOL><INDENT>num_blocks_tokens_in_gs[block_id] += <NUM_LIT:1><EOL>blocks_tokens_in_gs_tokens[block_id].append(token)<EOL><DEDENT><DEDENT>blocks_tokens_strs_in_gs = [<EOL>'<STR_LIT:U+0020>'.join(block_tokens_in_gs_tokens)<EOL>for block_tokens_in_gs_tokens in blocks_tokens_in_gs_tokens]<EOL>frac_blocks_tokens_in_gs = [<EOL>num_block_tokens_in_gs / num_block_tokens<EOL>for num_block_tokens_in_gs, num_block_tokens<EOL>in zip(num_blocks_tokens_in_gs, num_blocks_tokens)]<EOL>return (frac_blocks_tokens_in_gs, blocks_tokens_strs_in_gs)<EOL><DEDENT>gs_content, gs_comments = read_gold_standard_file(data_dir, fileroot, cetr)<EOL>frac_blocks_tokens_in_gs_content, blocks_tokens_strs_in_gs_content =get_frac_and_str_tokens_in_gs(gs_content)<EOL>frac_blocks_tokens_in_gs_comments, blocks_tokens_strs_in_gs_comments =get_frac_and_str_tokens_in_gs(gs_comments)<EOL>output_fname = os.path.join(<EOL>data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT)<EOL>line_fmt = u'<STR_LIT>'<EOL>with io.open(output_fname, mode='<STR_LIT:w>') as f:<EOL><INDENT>for block_id, block_tokens in enumerate(blocks_tokens):<EOL><INDENT>line = line_fmt.format(<EOL>frac_content=frac_blocks_tokens_in_gs_content[block_id],<EOL>frac_comments=frac_blocks_tokens_in_gs_comments[block_id],<EOL>block_tokens='<STR_LIT:U+0020>'.join(block_tokens),<EOL>content_tokens=blocks_tokens_strs_in_gs_content[block_id],<EOL>comment_tokens=blocks_tokens_strs_in_gs_comments[block_id])<EOL>f.write(line)<EOL><DEDENT><DEDENT>
|
Extract the gold standard block-level content and comments for a single
observation identified by ``fileroot``, and write the results to file.
Args:
data_dir (str): The root directory containing sub-directories for
raw HTML, gold standard extracted content, and gold standard blocks.
fileroot (str): Unique identifier for a single observation of training
data, corresponding to the start of its raw html and gold standard
filenames under ``data_dir``.
encoding (str)
tokenizer (Callable): Object that takes a string and returns the tokens
as a list of strings.
cetr (bool): If True, parse the gold standard in clean eval format.
Notes:
Results are written to a text file in the block-level gold standard dir
:obj:`GOLD_STANDARD_BLOCKS_DIRNAME` below ``data_dir``. Each line
corresponds to a single block in its order of appearance, and has the
following format::
content_frac comments_frac all_tokens content_tokens comments_tokens
where each item is separated by a tab. ``content_frac`` is equal to the
fraction of ``all_tokens`` found in the corresponding gold parse content
text; ``comments_frac`` is the same but for comments text.
|
f5165:m1
|
def get_filenames(dirname, full_path=False, match_regex=None, extension=None):
|
if not os.path.exists(dirname):<EOL><INDENT>raise OSError('<STR_LIT>'.format(dirname))<EOL><DEDENT>match_regex = re.compile(match_regex) if match_regex else None<EOL>for filename in sorted(os.listdir(dirname)):<EOL><INDENT>if extension and not os.path.splitext(filename)[-<NUM_LIT:1>] == extension:<EOL><INDENT>continue<EOL><DEDENT>if match_regex and not match_regex.search(filename):<EOL><INDENT>continue<EOL><DEDENT>if full_path is True:<EOL><INDENT>yield os.path.join(dirname, filename)<EOL><DEDENT>else:<EOL><INDENT>yield filename<EOL><DEDENT><DEDENT>
|
Get all filenames under ``dirname`` that match ``match_regex`` or have file
extension equal to ``extension``, optionally prepending the full path.
Args:
dirname (str): /path/to/dir on disk where files to read are saved
full_path (bool): if False, return filenames without path; if True,
return filenames with path, as ``os.path.join(dirname, fname)``
match_regex (str): include files whose names match this regex pattern
extension (str): if files only of a certain type are wanted,
specify the file extension (e.g. ".txt")
Yields:
str: next matching filename
|
f5165:m2
|
def read_html_file(data_dir, fileroot, encoding=None):
|
fname = os.path.join(<EOL>data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT)<EOL>encodings = (encoding,) if encoding else ('<STR_LIT:utf-8>', '<STR_LIT>') <EOL>for encoding in encodings:<EOL><INDENT>try:<EOL><INDENT>with io.open(fname, mode='<STR_LIT>', encoding=encoding) as f:<EOL><INDENT>raw_html = f.read()<EOL><DEDENT>break<EOL><DEDENT>except (UnicodeDecodeError, UnicodeError):<EOL><INDENT>raw_html = None<EOL><DEDENT><DEDENT>return ftfy.fix_encoding(raw_html).strip()<EOL>
|
Read the HTML file corresponding to identifier ``fileroot``
in the raw HTML directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
Returns:
str
|
f5165:m3
|
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False):
|
fname = os.path.join(<EOL>data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT)<EOL>encodings = (encoding,) if encoding else ('<STR_LIT:utf-8>', '<STR_LIT>', '<STR_LIT>')<EOL>for encoding in encodings:<EOL><INDENT>try:<EOL><INDENT>with io.open(fname, mode='<STR_LIT>', encoding=encoding) as f:<EOL><INDENT>gold_standard = f.read()<EOL><DEDENT>break<EOL><DEDENT>except (UnicodeDecodeError, UnicodeError):<EOL><INDENT>gold_standard = None<EOL><DEDENT><DEDENT>if not gold_standard:<EOL><INDENT>return [u'<STR_LIT>', u'<STR_LIT>']<EOL><DEDENT>if not cetr:<EOL><INDENT>content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=<NUM_LIT:1>)<EOL>if len(content_comments) == <NUM_LIT:1>:<EOL><INDENT>content_comments = [content_comments[<NUM_LIT:0>], u'<STR_LIT>']<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tree = etree.fromstring(gold_standard, parser=etree.HTMLParser())<EOL>content_comments = [u'<STR_LIT:U+0020>'.join(text_from_subtree(tree)), u'<STR_LIT>']<EOL><DEDENT>content_comments = [ftfy.fix_encoding(content_comments[<NUM_LIT:0>]).strip(),<EOL>ftfy.fix_encoding(content_comments[<NUM_LIT:1>]).strip()]<EOL>return content_comments<EOL>
|
Read the gold standard content file corresponding to identifier ``fileroot``
in the gold standard directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
encoding (str)
cetr (bool): if True, assume no comments and parse the gold standard
to remove tags
Returns:
List[str, str]: contents string and comments string, respectively
|
f5165:m4
|
def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True):
|
fname = os.path.join(<EOL>data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT)<EOL>with io.open(fname, mode='<STR_LIT:r>') as f:<EOL><INDENT>data = f.read()<EOL><DEDENT>if split_blocks:<EOL><INDENT>return filter(None, data[:-<NUM_LIT:1>].split('<STR_LIT:\n>'))<EOL><DEDENT>return filter(None, data)<EOL>
|
Read the gold standard blocks file corresponding to identifier ``fileroot``
in the gold standard blocks directory below the root ``data_dir``.
Args:
data_dir (str)
fileroot (str)
split_blocks (bool): If True, split the file's content into blocks.
Returns:
str or List[str]
|
f5165:m5
|
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=<NUM_LIT:0.1>):
|
if not <NUM_LIT:0.0> <= block_pct_tokens_thresh <= <NUM_LIT:1.0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>html = read_html_file(data_dir, fileroot)<EOL>blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True)<EOL>content_blocks = []<EOL>comments_blocks = []<EOL>for block in blocks:<EOL><INDENT>block_split = block.split('<STR_LIT:\t>')<EOL>num_block_tokens = len(block_split[<NUM_LIT:2>].split())<EOL>content_blocks.append(<EOL>(float(block_split[<NUM_LIT:0>]), num_block_tokens, block_split[<NUM_LIT:3>].split()))<EOL>comments_blocks.append(<EOL>(float(block_split[<NUM_LIT:1>]), num_block_tokens, block_split[<NUM_LIT:4>].split()))<EOL><DEDENT>parsed_content_blocks = _parse_content_or_comments_blocks(<EOL>content_blocks, block_pct_tokens_thresh)<EOL>parsed_comments_blocks = _parse_content_or_comments_blocks(<EOL>comments_blocks, block_pct_tokens_thresh)<EOL>return (html, parsed_content_blocks, parsed_comments_blocks)<EOL>
|
Prepare data for a single HTML + gold standard blocks example, uniquely
identified by ``fileroot``.
Args:
data_dir (str)
fileroot (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:
The first element is simply the raw html as a string. The second and
third elements are 3-tuples for content and comments, respectively,
where the first element is a numpy array of 1s and 0s whose values
correspond to whether or not a given block is considered non-content
or not; the second element is a numpy integer array whose values are
the total number of tokens in each block; and the third element is
a flat list of content or comment tokens as strings, concatenated
from all blocks.
See Also:
:func:`prepare_all_data`
|
f5165:m7
|
def prepare_all_data(data_dir, block_pct_tokens_thresh=<NUM_LIT:0.1>):
|
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)<EOL>gs_blocks_filenames = get_filenames(<EOL>gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))<EOL>gs_blocks_fileroots = (<EOL>re.search(r'<STR_LIT>' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(<NUM_LIT:1>)<EOL>for gs_blocks_filename in gs_blocks_filenames)<EOL>return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh)<EOL>for fileroot in gs_blocks_fileroots]<EOL>
|
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
|
f5165:m8
|
def fit(self, documents, labels, weights=None):
|
block_groups = np.array([self.blockifier.blockify(doc) for doc in documents])<EOL>mask = [self._has_enough_blocks(blocks) for blocks in block_groups]<EOL>block_groups = block_groups[mask]<EOL>labels = np.concatenate(np.array(labels)[mask])<EOL>features_mat = np.concatenate([self.features.fit_transform(blocks)<EOL>for blocks in block_groups])<EOL>if weights is None:<EOL><INDENT>self.model.fit(features_mat, labels)<EOL><DEDENT>else:<EOL><INDENT>weights = np.concatenate(np.array(weights)[mask])<EOL>self.model.fit(features_mat, labels, sample_weight=weights)<EOL><DEDENT>return self<EOL>
|
Fit :class`Extractor` features and model to a training dataset.
Args:
blocks (List[Block])
labels (``np.ndarray``)
weights (``np.ndarray``)
Returns:
:class`Extractor`
|
f5166:c0:m3
|
def get_html_labels_weights(self, data):
|
all_html = []<EOL>all_labels = []<EOL>all_weights = []<EOL>for html, content, comments in data:<EOL><INDENT>all_html.append(html)<EOL>labels, weights = self._get_labels_and_weights(<EOL>content, comments)<EOL>all_labels.append(labels)<EOL>all_weights.append(weights)<EOL><DEDENT>return np.array(all_html), np.array(all_labels), np.array(all_weights)<EOL>
|
Gather the html, labels, and weights of many files' data.
Primarily useful for training/testing an :class`Extractor`.
Args:
data: Output of :func:`dragnet.data_processing.prepare_all_data`.
Returns:
Tuple[List[Block], np.array(int), np.array(int)]: All blocks, all
labels, and all weights, respectively.
|
f5166:c0:m4
|
def _get_labels_and_weights(self, content, comments):
|
<EOL>if '<STR_LIT:content>' in self.to_extract and '<STR_LIT>' in self.to_extract:<EOL><INDENT>labels = np.logical_or(content[<NUM_LIT:0>], comments[<NUM_LIT:0>]).astype(int)<EOL>weights = content[<NUM_LIT:1>],<EOL><DEDENT>elif '<STR_LIT:content>' in self.to_extract:<EOL><INDENT>labels = content[<NUM_LIT:0>]<EOL>weights = content[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>labels = comments[<NUM_LIT:0>]<EOL>weights = comments[<NUM_LIT:1>]<EOL><DEDENT>if self.max_block_weight is None:<EOL><INDENT>weights = np.minimum(weights, self.max_block_weight)<EOL><DEDENT>return labels, weights<EOL>
|
Args:
content (Tuple[np.array[int], np.array[int], List[str]])
comments (Tuple[np.array[int], np.array[int], List[str]])
Returns:
Tuple[np.array[int], np.array[int], List[str]]
|
f5166:c0:m6
|
def extract(self, html, encoding=None, as_blocks=False):
|
preds, blocks = self.predict(html, encoding=encoding, return_blocks=True)<EOL>if as_blocks is False:<EOL><INDENT>return str_cast(b'<STR_LIT:\n>'.join(blocks[ind].text for ind in np.flatnonzero(preds)))<EOL><DEDENT>else:<EOL><INDENT>return [blocks[ind] for ind in np.flatnonzero(preds)]<EOL><DEDENT>
|
Extract the main content and/or comments from an HTML document and
return it as a string or as a sequence of block objects.
Args:
html (str): HTML document as a string.
encoding (str): Encoding of ``html``. If None (encoding unknown), the
original encoding will be guessed from the HTML itself.
as_blocks (bool): If False, return the main content as a combined
string; if True, return the content-holding blocks as a list of
block objects.
Returns:
str or List[Block]
|
f5166:c0:m7
|
def predict(self, documents, **kwargs):
|
if isinstance(documents, (str, bytes, unicode_, np.unicode_)):<EOL><INDENT>return self._predict_one(documents, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return np.concatenate([self._predict_one(doc, **kwargs) for doc in documents])<EOL><DEDENT>
|
Predict class (content=1 or not-content=0) of the blocks in one or many
HTML document(s).
Args:
documents (str or List[str]): HTML document(s)
Returns:
``np.ndarray`` or List[``np.ndarray``]: array of binary predictions
for content (1) or not-content (0).
|
f5166:c0:m8
|
def _predict_one(self, document, encoding=None, return_blocks=False):
|
<EOL>blocks = self.blockifier.blockify(document, encoding=encoding)<EOL>try:<EOL><INDENT>features = self.features.transform(blocks)<EOL><DEDENT>except ValueError: <EOL><INDENT>preds = np.zeros((len(blocks)))<EOL><DEDENT>else:<EOL><INDENT>if self.prob_threshold is None:<EOL><INDENT>preds = self.model.predict(features)<EOL><DEDENT>else:<EOL><INDENT>self._positive_idx = (<EOL>self._positive_idx or list(self.model.classes_).index(<NUM_LIT:1>))<EOL>preds = self.model.predict_proba(features) > self.prob_threshold<EOL>preds = preds[:, self._positive_idx].astype(int)<EOL><DEDENT><DEDENT>if return_blocks:<EOL><INDENT>return preds, blocks<EOL><DEDENT>else:<EOL><INDENT>return preds<EOL><DEDENT>
|
Predict class (content=1 or not-content=0) of each block in an HTML
document.
Args:
documents (str): HTML document
Returns:
``np.ndarray``: array of binary predictions for content (1) or
not-content (0).
|
f5166:c0:m9
|
def fit(self, blocks, y=None):
|
feature_array = self.feature.fit_transform(blocks)<EOL>self.scaler = self.scaler.fit(feature_array)<EOL>return self<EOL>
|
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`StandardizedFeature`: an instance of this class with the
``self.scaler`` attribute fit to the ``blocks`` data
Note:
When fitting the :class:`StandardScaler` object, you'll probably
want to determine the mean and/or std of *multiple* HTML files'
blocks, rather than just a single observation. To do that, just
concatenate all of the blocks together in a single iterable.
In contrast, you'll typically apply :meth:`transform` to a *single*
HTML file's blocks at a time.
|
f5168:c0:m1
|
def transform(self, blocks, y=None):
|
return self.scaler.transform(self.feature.transform(blocks))<EOL>
|
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features) and standardized feature values.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, num sub-features),
where ``blocks`` data has been transformed by ``self.feature``
and optionally standardized by ``self.scaler``.
|
f5168:c0:m2
|
def fit(self, blocks, y=None):
|
return self<EOL>
|
This method returns the current instance unchanged, since no fitting is
required for this ``Feature``. It's here only for API consistency.
|
f5169:c0:m0
|
def transform(self, blocks, y=None):
|
feature_vecs = (<EOL>tuple(re.search(token, block.css[attrib]) is not None<EOL>for block in blocks)<EOL>for attrib, tokens in self.attribute_tokens<EOL>for token in tokens<EOL>)<EOL>return np.column_stack(tuple(feature_vecs)).astype(int)<EOL>
|
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features).
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, num CSS attributes),
where values are either 0 or 1, indicating the absence or
presence of a given token in a CSS attribute on a given block.
|
f5169:c0:m1
|
def fit(self, blocks, y=None):
|
return self<EOL>
|
This method returns the current instance unchanged, since no fitting is
required for this ``Feature``. It's here only for API consistency.
|
f5170:c0:m0
|
def transform(self, blocks, y=None):
|
return make_kohlschuetter_features(blocks)<EOL>
|
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features).
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, 6), where values are
floats corresponding to the link and text densities of
a block and its immediate neighbors in the sequence.
|
f5170:c0:m1
|
def get_feature(name):
|
if name == '<STR_LIT>':<EOL><INDENT>return CSSFeatures()<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return KohlschuetterFeatures()<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return ReadabilityFeatures()<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return WeningerFeatures()<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return ClusteredWeningerFeatures()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(name))<EOL><DEDENT>
|
Get an instance of a ``Features`` class by ``name`` (str).
|
f5171:m0
|
def fit(self, blocks, y=None):
|
return self<EOL>
|
This method returns the current instance unchanged, since no fitting is
required for this ``Feature``. It's here only for API consistency.
|
f5172:c0:m0
|
def transform(self, blocks, y=None):
|
return make_readability_features(blocks)<EOL>
|
Transform an ordered sequence of blocks into a 2D features matrix with
shape (num blocks, num features).
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
`np.ndarray`: 2D array of shape (num blocks, 1)
|
f5172:c0:m1
|
def fit(self, blocks, y=None):
|
return self<EOL>
|
This method returns the current instance unchanged, since no fitting is
required for this ``Feature``. It's here only for API consistency.
|
f5173:c0:m1
|
def transform(self, blocks, y=None):
|
return make_weninger_features(blocks, sigma=self.sigma)<EOL>
|
Computes the content to tag ratio per block and returns the smoothed
values and the smoothed absolute differences for each block.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(x), 2), where values are
floats corresponding to the smoothed and smoothed absolute
difference values for each block.
|
f5173:c0:m2
|
def fit(self, blocks, y=None):
|
self.kmeans.fit(make_weninger_features(blocks))<EOL>self.kmeans.cluster_centers_.sort(axis=<NUM_LIT:0>)<EOL>self.kmeans.cluster_centers_[<NUM_LIT:0>, :] = np.zeros(<NUM_LIT:2>)<EOL>return self<EOL>
|
Fit a k-means clustering model using an ordered sequence of blocks.
|
f5173:c1:m1
|
def transform(self, blocks, y=None):
|
preds = (self.kmeans.predict(make_weninger_features(blocks)) > <NUM_LIT:0>).astype(int)<EOL>return np.reshape(preds, (-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>
|
Computes the content to tag ratio per block, smooths the values, then
predicts content (1) or not-content (0) using a fit k-means cluster model.
Args:
blocks (List[Block]): as output by :class:`Blockifier.blockify`
y (None): This isn't used, it's only here for API consistency.
Returns:
:class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where
values are either 0 or 1, corresponding to the kmeans prediction
of content (1) or not-content (0).
|
f5173:c1:m2
|
def block_output_tokens(blocks, true_tokens):
|
assert len(blocks) == len(true_tokens)<EOL>for k in range_(len(blocks)):<EOL><INDENT>block_tokens = re.split(r"<STR_LIT>", blocks[k].text.strip())<EOL>assert block_tokens == true_tokens[k]<EOL><DEDENT>
|
blocks = the output from blockify
true_tokens = a list of true tokens
|
f5178:m1
|
@property<EOL><INDENT>def sources(<EOL>self):<DEDENT>
|
sourceResultsList = []<EOL>sourceResultsList[:] = [dict(l) for l in self.sourceResultsList]<EOL>return sourceResultsList<EOL>
|
*The results of the search returned as a python list of dictionaries*
**Usage:**
.. code-block:: python
sources = tns.sources
|
f5187:c0:m1
|
@property<EOL><INDENT>def spectra(<EOL>self):<DEDENT>
|
specResultsList = []<EOL>specResultsList[:] = [dict(l) for l in self.specResultsList]<EOL>return specResultsList<EOL>
|
*The associated source spectral data*
**Usage:**
.. code-block:: python
sourceSpectra = tns.spectra
|
f5187:c0:m2
|
@property<EOL><INDENT>def files(<EOL>self):<DEDENT>
|
relatedFilesResultsList = []<EOL>relatedFilesResultsList[:] = [dict(l)<EOL>for l in self.relatedFilesResultsList]<EOL>return relatedFilesResultsList<EOL>
|
*The associated source files*
**Usage:**
.. code-block:: python
sourceFiles = tns.files
|
f5187:c0:m3
|
@property<EOL><INDENT>def photometry(<EOL>self):<DEDENT>
|
photResultsList = []<EOL>photResultsList[:] = [dict(l) for l in self.photResultsList]<EOL>return photResultsList<EOL>
|
*The associated source photometry*
**Usage:**
.. code-block:: python
sourcePhotometry = tns.photometry
|
f5187:c0:m4
|
@property<EOL><INDENT>def url(<EOL>self):<DEDENT>
|
return self._searchURL<EOL>
|
*The generated URL used for searching of the TNS*
**Usage:**
.. code-block:: python
searchURL = tns.url
|
f5187:c0:m5
|
def csv(<EOL>self,<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>csvSources = self.sourceResults.csv(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>csvPhot = self.photResults.csv(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>csvSpec = self.specResults.csv(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>csvFiles = self.relatedFilesResults.csv(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>csvSources = self.sourceResults.csv()<EOL>csvPhot = self.photResults.csv()<EOL>csvSpec = self.specResults.csv()<EOL>csvFiles = self.relatedFilesResults.csv()<EOL><DEDENT>return csvSources, csvPhot, csvSpec, csvFiles<EOL>
|
*Render the results in csv format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `csvSources` -- the top-level transient data
- `csvPhot` -- all photometry associated with the transients
- `csvSpec` -- all spectral data associated with the transients
- `csvFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in csv format:
.. code-block:: python
csvSources, csvPhot, csvSpec, csvFiles = tns.csv()
print csvSources
.. code-block:: text
TNSId,TNSName,discoveryName,discSurvey,raSex,decSex,raDeg,decDeg,transRedshift,specType,discMag,discMagFilter,discDate,objectUrl,hostName,hostRedshift,separationArcsec,separationNorthArcsec,separationEastArcsec
2016asf,SN2016asf,ASASSN-16cs,ASAS-SN,06:50:36.73,+31:06:45.36,102.6530,31.1126,0.021,SN Ia,17.1,V-Johnson,2016-03-06 08:09:36,http://wis-tns.weizmann.ac.il/object/2016asf,KUG 0647+311,,0.66,0.65,-0.13
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.csv("~/tns")
.. image:: https://i.imgur.com/BwwqMBg.png
:width: 800px
:alt: csv output
|
f5187:c0:m6
|
def json(<EOL>self,<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>jsonSources = self.sourceResults.json(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>jsonPhot = self.photResults.json(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>jsonSpec = self.specResults.json(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>jsonFiles = self.relatedFilesResults.json(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>jsonSources = self.sourceResults.json()<EOL>jsonPhot = self.photResults.json()<EOL>jsonSpec = self.specResults.json()<EOL>jsonFiles = self.relatedFilesResults.json()<EOL><DEDENT>return jsonSources, jsonPhot, jsonSpec, jsonFiles<EOL>
|
*Render the results in json format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `jsonSources` -- the top-level transient data
- `jsonPhot` -- all photometry associated with the transients
- `jsonSpec` -- all spectral data associated with the transients
- `jsonFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in json format:
.. code-block:: python
jsonSources, jsonPhot, jsonSpec, jsonFiles = tns.json()
print jsonSources
.. code-block:: text
[
{
"TNSId": "2016asf",
"TNSName": "SN2016asf",
"decDeg": 31.1126,
"decSex": "+31:06:45.36",
"discDate": "2016-03-06 08:09:36",
"discMag": "17.1",
"discMagFilter": "V-Johnson",
"discSurvey": "ASAS-SN",
"discoveryName": "ASASSN-16cs",
"hostName": "KUG 0647+311",
"hostRedshift": null,
"objectUrl": "http://wis-tns.weizmann.ac.il/object/2016asf",
"raDeg": 102.65304166666667,
"raSex": "06:50:36.73",
"separationArcsec": "0.66",
"separationEastArcsec": "-0.13",
"separationNorthArcsec": "0.65",
"specType": "SN Ia",
"transRedshift": "0.021"
}
]
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.json("~/tns")
.. image:: https://i.imgur.com/wAHqARI.png
:width: 800px
:alt: json output
|
f5187:c0:m7
|
def yaml(<EOL>self,<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>yamlSources = self.sourceResults.yaml(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>yamlPhot = self.photResults.yaml(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>yamlSpec = self.specResults.yaml(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>yamlFiles = self.relatedFilesResults.yaml(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>yamlSources = self.sourceResults.yaml()<EOL>yamlPhot = self.photResults.yaml()<EOL>yamlSpec = self.specResults.yaml()<EOL>yamlFiles = self.relatedFilesResults.yaml()<EOL><DEDENT>return yamlSources, yamlPhot, yamlSpec, yamlFiles<EOL>
|
*Render the results in yaml format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `yamlSources` -- the top-level transient data
- `yamlPhot` -- all photometry associated with the transients
- `yamlSpec` -- all spectral data associated with the transients
- `yamlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in yaml format:
.. code-block:: python
yamlSources, yamlPhot, yamlSpec, yamlFiles = tns.yaml()
print yamlSources
.. code-block:: text
- TNSId: 2016asf
TNSName: SN2016asf
decDeg: 31.1126
decSex: '+31:06:45.36'
discDate: '2016-03-06 08:09:36'
discMag: '17.1'
discMagFilter: V-Johnson
discSurvey: ASAS-SN
discoveryName: ASASSN-16cs
hostName: KUG 0647+311
hostRedshift: null
objectUrl: http://wis-tns.weizmann.ac.il/object/2016asf
raDeg: 102.65304166666667
raSex: '06:50:36.73'
separationArcsec: '0.66'
separationEastArcsec: '-0.13'
separationNorthArcsec: '0.65'
specType: SN Ia
transRedshift: '0.021'
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.yaml("~/tns")
.. image:: https://i.imgur.com/ZpJIC6p.png
:width: 800px
:alt: yaml output
|
f5187:c0:m8
|
def markdown(<EOL>self,<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>markdownSources = self.sourceResults.markdown(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>markdownPhot = self.photResults.markdown(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>markdownSpec = self.specResults.markdown(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>markdownFiles = self.relatedFilesResults.markdown(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>markdownSources = self.sourceResults.markdown()<EOL>markdownPhot = self.photResults.markdown()<EOL>markdownSpec = self.specResults.markdown()<EOL>markdownFiles = self.relatedFilesResults.markdown()<EOL><DEDENT>return markdownSources, markdownPhot, markdownSpec, markdownFiles<EOL>
|
*Render the results in markdown format*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `markdownSources` -- the top-level transient data
- `markdownPhot` -- all photometry associated with the transients
- `markdownSpec` -- all spectral data associated with the transients
- `markdownFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in markdown table format:
.. code-block:: python
markdownSources, markdownPhot, markdownSpec, markdownFiles = tns.markdown()
print markdownSources
.. code-block:: text
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
|:---------|:-----------|:---------------|:------------|:-------------|:--------------|:----------|:---------|:---------------|:----------|:---------|:---------------|:---------------------|:----------------------------------------------|:--------------|:--------------|:------------------|:-----------------------|:----------------------|
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.markdown("~/tns")
.. image:: https://i.imgur.com/AYLBQoJ.png
:width: 800px
:alt: markdown output
|
f5187:c0:m9
|
def table(<EOL>self,<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>tableSources = self.sourceResults.table(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>tablePhot = self.photResults.table(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>tableSpec = self.specResults.table(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL>tableFiles = self.relatedFilesResults.table(<EOL>filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>tableSources = self.sourceResults.table()<EOL>tablePhot = self.photResults.table()<EOL>tableSpec = self.specResults.table()<EOL>tableFiles = self.relatedFilesResults.table()<EOL><DEDENT>return tableSources, tablePhot, tableSpec, tableFiles<EOL>
|
*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
|
f5187:c0:m10
|
def mysql(<EOL>self,<EOL>tableNamePrefix="<STR_LIT>",<EOL>dirPath=None):
|
if dirPath:<EOL><INDENT>p = self._file_prefix()<EOL>createStatement = """<STR_LIT>""" % locals()<EOL>mysqlSources = self.sourceResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>", filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>", createStatement=createStatement)<EOL>createStatement = """<STR_LIT>""" % locals()<EOL>mysqlPhot = self.photResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>", filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>", createStatement=createStatement)<EOL>createStatement = """<STR_LIT>""" % locals()<EOL>mysqlSpec = self.specResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>", filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>", createStatement=createStatement)<EOL>createStatement = """<STR_LIT>""" % locals()<EOL>mysqlFiles = self.relatedFilesResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>", filepath=dirPath + "<STR_LIT:/>" + p + "<STR_LIT>", createStatement=createStatement)<EOL><DEDENT>else:<EOL><INDENT>mysqlSources = self.sourceResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>")<EOL>mysqlPhot = self.photResults.mysql(tableNamePrefix + "<STR_LIT>")<EOL>mysqlSpec = self.specResults.mysql(tableNamePrefix + "<STR_LIT>")<EOL>mysqlFiles = self.relatedFilesResults.mysql(<EOL>tableNamePrefix + "<STR_LIT>")<EOL><DEDENT>return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles<EOL>
|
*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
|
f5187:c0:m11
|
def _query_tns(self):
|
self.log.info('<STR_LIT>')<EOL>sourceTable = []<EOL>photoTable = []<EOL>specTable = []<EOL>relatedFilesTable = []<EOL>stop = False<EOL>sourceCount = <NUM_LIT:0><EOL>while not stop:<EOL><INDENT>status_code, content, self._searchURL = self._get_tns_search_results()<EOL>if status_code != <NUM_LIT:200>:<EOL><INDENT>self.log.error(<EOL>'<STR_LIT>' % locals())<EOL>return None<EOL><DEDENT>if "<STR_LIT>" in content:<EOL><INDENT>print("<STR_LIT>")<EOL>return sourceTable, photoTable, specTable, relatedFilesTable<EOL><DEDENT>if self._parse_transient_rows(content, True) < self.batchSize:<EOL><INDENT>stop = True<EOL><DEDENT>else:<EOL><INDENT>self.page += <NUM_LIT:1><EOL>thisPage = self.page<EOL>print("<STR_LIT>" % locals())<EOL>sourceCount += self.batchSize<EOL>print("<STR_LIT:\t>" + self._searchURL)<EOL>timesleep.sleep(<NUM_LIT:1>)<EOL><DEDENT>for transientRow in self._parse_transient_rows(content):<EOL><INDENT>sourceContent = transientRow.group()<EOL>discInfo, TNSId = self._parse_discovery_information(<EOL>sourceContent)<EOL>sourceTable.append(discInfo)<EOL>phot, relatedFiles = self._parse_photometry_data(<EOL>sourceContent, TNSId)<EOL>photoTable += phot<EOL>relatedFilesTable += relatedFiles<EOL>spec, relatedFiles = self._parse_spectral_data(<EOL>sourceContent, TNSId)<EOL>specTable += spec<EOL>relatedFilesTable += relatedFiles<EOL><DEDENT><DEDENT>try:<EOL><INDENT>sourceTable = sorted(sourceTable, key=itemgetter(<EOL>'<STR_LIT>'), reverse=False)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>self.log.info('<STR_LIT>')<EOL>return sourceTable, photoTable, specTable, relatedFilesTable<EOL>
|
*determine how to query the TNS, send query and parse the results*
**Return:**
- ``results`` -- a list of dictionaries (one dictionary for each result set returned from the TNS)
|
f5187:c0:m12
|
def _get_tns_search_results(<EOL>self):
|
self.log.info('<STR_LIT>')<EOL>try:<EOL><INDENT>response = requests.get(<EOL>url="<STR_LIT>",<EOL>params={<EOL>"<STR_LIT>": self.page,<EOL>"<STR_LIT>": self.ra,<EOL>"<STR_LIT>": self.dec,<EOL>"<STR_LIT>": self.radiusArcsec,<EOL>"<STR_LIT:name>": self.name,<EOL>"<STR_LIT>": self.internal_name,<EOL>"<STR_LIT>": self.start,<EOL>"<STR_LIT>": self.end,<EOL>"<STR_LIT>": self.batchSize,<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>"<STR_LIT>": "<STR_LIT:1>",<EOL>},<EOL>)<EOL><DEDENT>except requests.exceptions.RequestException:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>self.log.info('<STR_LIT>')<EOL>return response.status_code, response.content, response.url<EOL>
|
*query the tns and result the response*
|
f5187:c0:m13
|
def _file_prefix(<EOL>self):
|
self.log.info('<STR_LIT>')<EOL>if self.ra:<EOL><INDENT>now = datetime.now()<EOL>prefix = now.strftime("<STR_LIT>")<EOL><DEDENT>elif self.name:<EOL><INDENT>prefix = self.name + "<STR_LIT>"<EOL><DEDENT>elif self.internal_name:<EOL><INDENT>prefix = self.internal_name + "<STR_LIT>"<EOL><DEDENT>elif self.discInLastDays:<EOL><INDENT>discInLastDays = str(self.discInLastDays)<EOL>now = datetime.now()<EOL>prefix = now.strftime(<EOL>discInLastDays + "<STR_LIT>")<EOL><DEDENT>self.log.info('<STR_LIT>')<EOL>return prefix<EOL>
|
*Generate a file prefix based on the type of search for saving files to disk*
**Return:**
- ``prefix`` -- the file prefix
|
f5187:c0:m14
|
def _parse_transient_rows(<EOL>self,<EOL>content,<EOL>count=False):
|
self.log.info('<STR_LIT>')<EOL>regexForRow = r"""<STR_LIT>"""<EOL>if count:<EOL><INDENT>matchedSources = re.findall(<EOL>regexForRow,<EOL>content,<EOL>flags=re.S <EOL>)<EOL>return len(matchedSources)<EOL><DEDENT>matchedSources = re.finditer(<EOL>regexForRow,<EOL>content,<EOL>flags=re.S <EOL>)<EOL>self.log.info('<STR_LIT>')<EOL>return matchedSources<EOL>
|
* parse transient rows from the TNS result page content*
**Key Arguments:**
- ``content`` -- the content from the TNS results page.
- ``count`` -- return only the number of rows
**Return:**
- ``transientRows``
|
f5187:c0:m15
|
def _parse_discovery_information(<EOL>self,<EOL>content):
|
self.log.info('<STR_LIT>')<EOL>converter = unit_conversion(<EOL>log=self.log<EOL>)<EOL>matches = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>content,<EOL>flags=<NUM_LIT:0> <EOL>)<EOL>discoveryData = []<EOL>for match in matches:<EOL><INDENT>row = match.groupdict()<EOL>for k, v in row.items():<EOL><INDENT>row[k] = v.strip()<EOL>if len(v) == <NUM_LIT:0>:<EOL><INDENT>row[k] = None<EOL><DEDENT><DEDENT>if row["<STR_LIT>"] == <NUM_LIT:0>:<EOL><INDENT>row["<STR_LIT>"] = None<EOL><DEDENT>if row["<STR_LIT>"][<NUM_LIT:0>] in ["<STR_LIT:1>", "<STR_LIT:2>"]:<EOL><INDENT>row["<STR_LIT>"] = "<STR_LIT>" + row["<STR_LIT>"]<EOL><DEDENT>row["<STR_LIT>"] = "<STR_LIT>" +row["<STR_LIT>"]<EOL>row["<STR_LIT>"] = converter.ra_sexegesimal_to_decimal(<EOL>ra=row["<STR_LIT>"]<EOL>)<EOL>row["<STR_LIT>"] = converter.dec_sexegesimal_to_decimal(<EOL>dec=row["<STR_LIT>"]<EOL>)<EOL>if self.ra:<EOL><INDENT>from astrocalc.coords import separations<EOL>calculator = separations(<EOL>log=self.log,<EOL>ra1=self.ra,<EOL>dec1=self.dec,<EOL>ra2=row["<STR_LIT>"],<EOL>dec2=row["<STR_LIT>"],<EOL>)<EOL>angularSeparation, north, east = calculator.get()<EOL>row["<STR_LIT>"] = angularSeparation<EOL>row["<STR_LIT>"] = north<EOL>row["<STR_LIT>"] = east<EOL><DEDENT>if not row["<STR_LIT>"]:<EOL><INDENT>row["<STR_LIT>"] = row["<STR_LIT>"]<EOL><DEDENT>del row["<STR_LIT>"]<EOL>del row["<STR_LIT>"]<EOL>row["<STR_LIT>"] = row["<STR_LIT>"].replace("<STR_LIT:U+0020>", "<STR_LIT>")<EOL>row["<STR_LIT>"] = row["<STR_LIT>"].replace(<EOL>"<STR_LIT>", "<STR_LIT>").replace("<STR_LIT>", "<STR_LIT>")<EOL>TNSId = row["<STR_LIT>"]<EOL>orow = collections.OrderedDict()<EOL>keyOrder = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL>for k, v in row.items():<EOL><INDENT>if k not in keyOrder:<EOL><INDENT>keyOrder.append(k)<EOL><DEDENT><DEDENT>for k in keyOrder:<EOL><INDENT>try:<EOL><INDENT>orow[k] = row[k]<EOL><DEDENT>except:<EOL><INDENT>self.log.info(<EOL>"<STR_LIT>" % locals())<EOL>pass<EOL><DEDENT><DEDENT>discoveryData.append(row)<EOL><DEDENT>self.log.info('<STR_LIT>')<EOL>return discoveryData[<NUM_LIT:0>], TNSId<EOL>
|
* parse discovery information from one row on the TNS results page*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page.
**Return:**
- ``discoveryData`` -- dictionary of results
- ``TNSId`` -- the unique TNS id for the transient
|
f5187:c0:m16
|
def _parse_photometry_data(<EOL>self,<EOL>content,<EOL>TNSId):
|
self.log.info('<STR_LIT>')<EOL>photData = []<EOL>relatedFilesTable = []<EOL>ATBlock = re.search(<EOL>r"""<STR_LIT>""",<EOL>content,<EOL>flags=re.S <EOL>)<EOL>if ATBlock:<EOL><INDENT>ATBlock = ATBlock.group()<EOL>reports = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>ATBlock,<EOL>flags=re.S <EOL>)<EOL>relatedFiles = self._parse_related_files(ATBlock)<EOL>for r in reports:<EOL><INDENT>header = re.search(<EOL>r"""<STR_LIT>""",<EOL>r.group(),<EOL>flags=<NUM_LIT:0> <EOL>)<EOL>try:<EOL><INDENT>header = header.groupdict()<EOL><DEDENT>except:<EOL><INDENT>print(r.group())<EOL><DEDENT>header["<STR_LIT>"] = TNSId<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>if not self.comments:<EOL><INDENT>del header['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>theseComments = header[<EOL>"<STR_LIT>"].split("<STR_LIT:\n>")<EOL>header["<STR_LIT>"] = "<STR_LIT>"<EOL>for c in theseComments:<EOL><INDENT>header["<STR_LIT>"] += "<STR_LIT:U+0020>" + c.strip()<EOL><DEDENT>header["<STR_LIT>"] = header[<EOL>"<STR_LIT>"].strip().replace('<STR_LIT:">', "<STR_LIT:'>")[<NUM_LIT:0>:<NUM_LIT>]<EOL><DEDENT>phot = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>r.group(),<EOL>flags=<NUM_LIT:0> <EOL>)<EOL>filesAppended = False<EOL>for p in phot:<EOL><INDENT>p = p.groupdict()<EOL>del p["<STR_LIT>"]<EOL>if p["<STR_LIT>"] and not p["<STR_LIT>"]:<EOL><INDENT>p["<STR_LIT>"] = p["<STR_LIT>"]<EOL>p["<STR_LIT>"] = <NUM_LIT:1><EOL>p["<STR_LIT>"] = p["<STR_LIT>"].replace(<EOL>"<STR_LIT>", "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>p["<STR_LIT>"] = <NUM_LIT:0><EOL><DEDENT>if not self.comments:<EOL><INDENT>del p["<STR_LIT>"]<EOL><DEDENT>p.update(header)<EOL>if p["<STR_LIT>"] and filesAppended == False:<EOL><INDENT>filesAppended = True<EOL>for f in relatedFiles:<EOL><INDENT>thisFile = collections.OrderedDict()<EOL>thisFile["<STR_LIT>"] = TNSId<EOL>thisFile["<STR_LIT:filename>"] = f[<EOL>"<STR_LIT>"].split("<STR_LIT:/>")[-<NUM_LIT:1>]<EOL>thisFile["<STR_LIT:url>"] = f["<STR_LIT>"]<EOL>if self.comments:<EOL><INDENT>thisFile["<STR_LIT>"] = f[<EOL>"<STR_LIT>"].replace("<STR_LIT:\n>", "<STR_LIT:U+0020>").strip().replace('<STR_LIT:">', "<STR_LIT:'>")[<NUM_LIT:0>:<NUM_LIT>]<EOL><DEDENT>thisFile["<STR_LIT>"] = p["<STR_LIT>"]<EOL>thisFile["<STR_LIT>"] = <NUM_LIT:2><EOL>relatedFilesTable.append(thisFile)<EOL><DEDENT><DEDENT>if not p["<STR_LIT>"] and not p["<STR_LIT>"]:<EOL><INDENT>p["<STR_LIT>"] = p["<STR_LIT>"]<EOL><DEDENT>del p["<STR_LIT>"]<EOL>del p["<STR_LIT>"]<EOL>orow = collections.OrderedDict()<EOL>keyOrder = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL>for k, v in p.items():<EOL><INDENT>if k not in keyOrder:<EOL><INDENT>keyOrder.append(k)<EOL><DEDENT><DEDENT>for k in keyOrder:<EOL><INDENT>try:<EOL><INDENT>orow[k] = p[k]<EOL><DEDENT>except:<EOL><INDENT>self.log.info(<EOL>"<STR_LIT>" % locals())<EOL>pass<EOL><DEDENT><DEDENT>photData.append(orow)<EOL><DEDENT><DEDENT><DEDENT>self.log.info('<STR_LIT>')<EOL>return photData, relatedFilesTable<EOL>
|
*parse photometry data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``photData`` -- a list of dictionaries of the photometry data
- ``relatedFilesTable`` -- a list of dictionaries of transient photometry related files
|
f5187:c0:m17
|
def _parse_related_files(<EOL>self,<EOL>content):
|
self.log.info('<STR_LIT>')<EOL>relatedFilesList = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>content,<EOL>flags=<NUM_LIT:0> <EOL>)<EOL>relatedFiles = []<EOL>for f in relatedFilesList:<EOL><INDENT>f = f.groupdict()<EOL>relatedFiles.append(f)<EOL><DEDENT>self.log.info('<STR_LIT>')<EOL>return relatedFiles<EOL>
|
*parse the contents for related files URLs and comments*
**Key Arguments:**
- ``content`` -- the content to parse.
**Return:**
- ``relatedFiles`` -- a list of dictionaries of transient related files
|
f5187:c0:m18
|
def _parse_spectral_data(<EOL>self,<EOL>content,<EOL>TNSId):
|
self.log.info('<STR_LIT>')<EOL>specData = []<EOL>relatedFilesTable = []<EOL>classBlock = re.search(<EOL>r"""<STR_LIT>""",<EOL>content,<EOL>flags=re.S <EOL>)<EOL>if classBlock:<EOL><INDENT>classBlock = classBlock.group()<EOL>reports = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>classBlock,<EOL>flags=re.S <EOL>)<EOL>relatedFiles = self._parse_related_files(classBlock)<EOL>for r in reports:<EOL><INDENT>header = re.search(<EOL>r"""<STR_LIT>""",<EOL>r.group(),<EOL>flags=re.S <EOL>)<EOL>if not header:<EOL><INDENT>continue<EOL><DEDENT>header = header.groupdict()<EOL>header["<STR_LIT>"] = TNSId<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>del header["<STR_LIT>"]<EOL>if not self.comments:<EOL><INDENT>del header['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>theseComments = header[<EOL>"<STR_LIT>"].split("<STR_LIT:\n>")<EOL>header["<STR_LIT>"] = "<STR_LIT>"<EOL>for c in theseComments:<EOL><INDENT>header["<STR_LIT>"] += "<STR_LIT:U+0020>" + c.strip()<EOL><DEDENT>header["<STR_LIT>"] = header[<EOL>"<STR_LIT>"].strip().replace('<STR_LIT:">', "<STR_LIT:'>")[<NUM_LIT:0>:<NUM_LIT>]<EOL><DEDENT>spec = re.finditer(<EOL>r"""<STR_LIT>""",<EOL>r.group(),<EOL>flags=<NUM_LIT:0> <EOL>)<EOL>filesAppended = False<EOL>for s in spec:<EOL><INDENT>s = s.groupdict()<EOL>del s["<STR_LIT>"]<EOL>del s["<STR_LIT>"]<EOL>del s["<STR_LIT>"]<EOL>if not self.comments:<EOL><INDENT>del s["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>s["<STR_LIT>"] = s["<STR_LIT>"].replace('<STR_LIT:">', "<STR_LIT:'>")[<NUM_LIT:0>:<NUM_LIT>]<EOL><DEDENT>s.update(header)<EOL>if s["<STR_LIT>"] and filesAppended == False:<EOL><INDENT>filesAppended = True<EOL>for f in relatedFiles:<EOL><INDENT>thisFile = collections.OrderedDict()<EOL>thisFile["<STR_LIT>"] = TNSId<EOL>thisFile["<STR_LIT:filename>"] = f[<EOL>"<STR_LIT>"].split("<STR_LIT:/>")[-<NUM_LIT:1>]<EOL>thisFile["<STR_LIT:url>"] = f["<STR_LIT>"]<EOL>if self.comments:<EOL><INDENT>thisFile["<STR_LIT>"] = f[<EOL>"<STR_LIT>"].replace("<STR_LIT:\n>", "<STR_LIT:U+0020>").strip()<EOL><DEDENT>thisFile["<STR_LIT>"] = s["<STR_LIT>"]<EOL>thisFile["<STR_LIT>"] = <NUM_LIT:1><EOL>relatedFilesTable.append(thisFile)<EOL><DEDENT><DEDENT>for ffile in [s["<STR_LIT>"], s["<STR_LIT>"]]:<EOL><INDENT>if ffile:<EOL><INDENT>thisFile = collections.OrderedDict()<EOL>thisFile["<STR_LIT>"] = TNSId<EOL>thisFile["<STR_LIT:filename>"] = ffile.split(<EOL>"<STR_LIT:/>")[-<NUM_LIT:1>]<EOL>thisFile["<STR_LIT:url>"] = ffile<EOL>if self.comments:<EOL><INDENT>thisFile["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>thisFile["<STR_LIT>"] = s["<STR_LIT>"]<EOL>thisFile["<STR_LIT>"] = <NUM_LIT:1><EOL>relatedFilesTable.append(thisFile)<EOL><DEDENT><DEDENT>del s["<STR_LIT>"]<EOL>del s["<STR_LIT>"]<EOL>del s["<STR_LIT>"]<EOL>orow = collections.OrderedDict()<EOL>keyOrder = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL>for k, v in s.items():<EOL><INDENT>if k not in keyOrder:<EOL><INDENT>keyOrder.append(k)<EOL><DEDENT><DEDENT>for k in keyOrder:<EOL><INDENT>try:<EOL><INDENT>orow[k] = s[k]<EOL><DEDENT>except:<EOL><INDENT>self.log.info(<EOL>"<STR_LIT>" % locals())<EOL>pass<EOL><DEDENT><DEDENT>specData.append(orow)<EOL><DEDENT><DEDENT><DEDENT>self.log.info('<STR_LIT>')<EOL>return specData, relatedFilesTable<EOL>
|
*parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files
|
f5187:c0:m19
|
def getpackagepath():
|
moduleDirectory = os.path.dirname(__file__)<EOL>packagePath = os.path.dirname(__file__) + "<STR_LIT>"<EOL>return packagePath<EOL>
|
*getpackagepath*
|
f5188:m0
|
def main(arguments=None):
|
<EOL>su = tools(<EOL>arguments=arguments,<EOL>docString=__doc__,<EOL>logLevel="<STR_LIT>",<EOL>options_first=False,<EOL>projectName="<STR_LIT>"<EOL>)<EOL>arguments, settings, log, dbConn = su.setup()<EOL>readline.set_completer_delims('<STR_LIT>')<EOL>readline.parse_and_bind("<STR_LIT>")<EOL>readline.set_completer(tab_complete)<EOL>for arg, val in arguments.items():<EOL><INDENT>if arg[<NUM_LIT:0>] == "<STR_LIT:->":<EOL><INDENT>varname = arg.replace("<STR_LIT:->", "<STR_LIT>") + "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>varname = arg.replace("<STR_LIT:<>", "<STR_LIT>").replace("<STR_LIT:>>", "<STR_LIT>")<EOL><DEDENT>if isinstance(val, str) or isinstance(val, str):<EOL><INDENT>exec(varname + "<STR_LIT>" % (val,))<EOL><DEDENT>else:<EOL><INDENT>exec(varname + "<STR_LIT>" % (val,))<EOL><DEDENT>if arg == "<STR_LIT>":<EOL><INDENT>dbConn = val<EOL><DEDENT>log.debug('<STR_LIT>' % (varname, val,))<EOL><DEDENT>startTime = times.get_now_sql_datetime()<EOL>log.info(<EOL>'<STR_LIT>' %<EOL>(startTime,))<EOL>if search or new or cone:<EOL><INDENT>if ra:<EOL><INDENT>tns = transientNamer.search(<EOL>log=log,<EOL>ra=ra,<EOL>dec=dec,<EOL>radiusArcsec=arcsecRadius,<EOL>comments=withCommentsFlag<EOL>)<EOL><DEDENT>if name:<EOL><INDENT>tns = transientNamer.search(<EOL>log=log,<EOL>name=name,<EOL>comments=withCommentsFlag<EOL>)<EOL><DEDENT>if discInLastDays:<EOL><INDENT>tns = transientNamer.search(<EOL>log=log,<EOL>discInLastDays=discInLastDays,<EOL>comments=withCommentsFlag<EOL>)<EOL><DEDENT>if outputFlag and not os.path.exists(outputFlag):<EOL><INDENT>os.makedirs(outputFlag)<EOL><DEDENT>if tableNamePrefix:<EOL><INDENT>sources, phot, spec, files = tns.mysql(<EOL>tableNamePrefix=tableNamePrefix, dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT:\n>")) - <NUM_LIT:1><EOL><DEDENT>elif not render or render == "<STR_LIT>":<EOL><INDENT>sources, phot, spec, files = tns.table(dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT:\n>")) - <NUM_LIT:4><EOL><DEDENT>elif render == "<STR_LIT>":<EOL><INDENT>sources, phot, spec, files = tns.csv(dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT:\n>")) - <NUM_LIT:1><EOL><DEDENT>elif render == "<STR_LIT>":<EOL><INDENT>sources, phot, spec, files = tns.json(dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT:{>")) - <NUM_LIT:1><EOL><DEDENT>elif render == "<STR_LIT>":<EOL><INDENT>sources, phot, spec, files = tns.yaml(dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT>"))<EOL><DEDENT>elif render == "<STR_LIT>":<EOL><INDENT>sources, phot, spec, files = tns.markdown(dirPath=outputFlag)<EOL>numSources = len(sources.split("<STR_LIT:\n>")) - <NUM_LIT:2><EOL><DEDENT>if numSources == <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>" % locals())<EOL><DEDENT>elif numSources > <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>" % locals())<EOL><DEDENT>if not outputFlag:<EOL><INDENT>print("<STR_LIT>")<EOL>print(sources)<EOL>print("<STR_LIT>")<EOL>print(phot)<EOL>print("<STR_LIT>")<EOL>print(spec)<EOL>print("<STR_LIT>")<EOL>print(files)<EOL>print("<STR_LIT>")<EOL>print(tns.url)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in locals() and dbConn:<EOL><INDENT>dbConn.commit()<EOL>dbConn.close()<EOL><DEDENT>endTime = times.get_now_sql_datetime()<EOL>runningTime = times.calculate_time_difference(startTime, endTime)<EOL>log.info('<STR_LIT>' %<EOL>(endTime, runningTime, ))<EOL>return<EOL>
|
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
|
f5190:m1
|
def match(list_a, list_b, not_found='<STR_LIT>', enforce_sublist=False,<EOL>country_data=COUNTRY_DATA_FILE, additional_data=None):
|
if isinstance(list_a, str):<EOL><INDENT>list_a = [list_a]<EOL><DEDENT>if isinstance(list_b, str):<EOL><INDENT>list_b = [list_b]<EOL><DEDENT>if isinstance(list_a, tuple):<EOL><INDENT>list_a = list(list_a)<EOL><DEDENT>if isinstance(list_b, tuple):<EOL><INDENT>list_b = list(list_b)<EOL><DEDENT>coco = CountryConverter(country_data, additional_data)<EOL>name_dict_a = dict()<EOL>match_dict_a = dict()<EOL>for name_a in list_a:<EOL><INDENT>name_dict_a[name_a] = []<EOL>match_dict_a[name_a] = []<EOL>for regex in coco.regexes:<EOL><INDENT>if regex.search(name_a):<EOL><INDENT>match_dict_a[name_a].append(regex)<EOL><DEDENT><DEDENT>if len(match_dict_a[name_a]) == <NUM_LIT:0>:<EOL><INDENT>logging.warning('<STR_LIT>'.format(name_a))<EOL>_not_found_entry = name_a if not not_found else not_found<EOL>name_dict_a[name_a].append(_not_found_entry)<EOL>if not enforce_sublist:<EOL><INDENT>name_dict_a[name_a] = name_dict_a[name_a][<NUM_LIT:0>]<EOL><DEDENT>continue<EOL><DEDENT>if len(match_dict_a[name_a]) > <NUM_LIT:1>:<EOL><INDENT>logging.warning(<EOL>'<STR_LIT>'.format(name_a))<EOL><DEDENT>for match_case in match_dict_a[name_a]:<EOL><INDENT>b_matches = <NUM_LIT:0><EOL>for name_b in list_b:<EOL><INDENT>if match_case.search(name_b):<EOL><INDENT>b_matches += <NUM_LIT:1><EOL>name_dict_a[name_a].append(name_b)<EOL><DEDENT><DEDENT><DEDENT>if b_matches == <NUM_LIT:0>:<EOL><INDENT>logging.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(name_a))<EOL>_not_found_entry = name_a if not not_found else not_found<EOL>name_dict_a[name_a].append(_not_found_entry)<EOL><DEDENT>if b_matches > <NUM_LIT:1>:<EOL><INDENT>logging.warning('<STR_LIT>'<EOL>'<STR_LIT>'.format(name_a))<EOL><DEDENT>if not enforce_sublist and (len(name_dict_a[name_a]) == <NUM_LIT:1>):<EOL><INDENT>name_dict_a[name_a] = name_dict_a[name_a][<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return name_dict_a<EOL>
|
Matches the country names given in two lists into a dictionary.
This function matches names given in list_a to the one provided in list_b
using regular expressions defined in country_data.
Parameters
----------
list_a : list
Names of countries to identify
list_b : list
Master list of names for countries
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
enforce_sublist : boolean, optional
If True, all entries in both list are list.
If False(default), only multiple matches are list, rest are strings
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
Returns
-------
dict:
A dictionary with a key for every entry in list_a. The value
correspond to the matching entry in list_b if found. If there is
a 1:1 correspondence, the value is a str (if enforce_sublist is False),
otherwise multiple entries as list.
|
f5198:m1
|
def convert(*args, **kargs):
|
init = {'<STR_LIT>': COUNTRY_DATA_FILE,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': False}<EOL>init.update({kk: kargs.get(kk) for kk in init.keys() if kk in kargs})<EOL>coco = CountryConverter(**init)<EOL>kargs = {kk: ii for kk, ii in kargs.items() if kk not in init.keys()}<EOL>return coco.convert(*args, **kargs)<EOL>
|
Wrapper around CountryConverter.convert()
Uses the same parameters. This function has the same performance as
CountryConverter.convert for one call; for multiple calls it is better to
instantiate a common CountryConverter (this avoid loading the source data
file multiple times).
Note
----
A lot of the functionality can also be done directly in Pandas DataFrames.
For example:
cc = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
cc.data[cc.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert to 'to' classification
src : str, optional
Source classification
to : str, optional
Output classification (valid str for an index of the
country data file), default: name_short
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was passed)
or to be a list of lists (if multiple names were passed). If False
(default), the output will be a string (if only one name was passed) or
a list of str and/or lists (str if a one to one matching, list
otherwise).
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data_file. (utf-8 encoded tab separated data, same
column headers as in the general country data file)
Returns
-------
list or str, depending on enforce_list
|
f5198:m2
|
def _parse_arg(valid_classifications):
|
parser = argparse.ArgumentParser(<EOL>description=('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(__version__)<EOL>), prog='<STR_LIT>', usage=('<STR_LIT>'))<EOL>parser.add_argument('<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(valid_classifications) +<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>), nargs='<STR_LIT:*>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>args = parser.parse_args()<EOL>args.src = args.src or None<EOL>args.to = args.to or '<STR_LIT>'<EOL>args.not_found = args.not_found if args.not_found != '<STR_LIT:None>' else None<EOL>args.output_sep = args.output_sep or '<STR_LIT:U+0020>'<EOL>return args<EOL>
|
Command line parser for coco
Parameters
----------
valid_classifications: list
Available classifications, used for checking input parameters.
Returns
-------
args : ArgumentParser namespace
|
f5198:m3
|
def main():
|
args = _parse_arg(CountryConverter().valid_class)<EOL>coco = CountryConverter(additional_data=args.additional_data)<EOL>converted_names = coco.convert(<EOL>names=args.names,<EOL>src=args.src,<EOL>to=args.to,<EOL>enforce_list=False,<EOL>not_found=args.not_found)<EOL>print(args.output_sep.join(<EOL>[str(etr) for etr in converted_names] if<EOL>isinstance(converted_names, list) else [str(converted_names)]))<EOL>
|
Main entry point - used for command line call
|
f5198:m4
|
@staticmethod<EOL><INDENT>def _separate_exclude_cases(name, exclude_prefix):<DEDENT>
|
excluder = re.compile('<STR_LIT:|>'.join(exclude_prefix))<EOL>split_entries = excluder.split(name)<EOL>return {'<STR_LIT>': split_entries[<NUM_LIT:0>],<EOL>'<STR_LIT>': split_entries[<NUM_LIT:1>:]}<EOL>
|
Splits the excluded
Parameters
----------
name : str
Name of the country/region to convert.
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Returns
-------
dict with
'clean_name' : str
as name without anything following exclude_prefix
'excluded_countries' : list
list of excluded countries
|
f5198:c0:m0
|
def __init__(self, country_data=COUNTRY_DATA_FILE,<EOL>additional_data=None, only_UNmember=False,<EOL>include_obsolete=False):
|
must_be_unique = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>must_be_string = must_be_unique + (['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'])<EOL>def test_for_unique_names(df, data_name='<STR_LIT>',<EOL>report_fun=logging.error):<EOL><INDENT>for name_entry in must_be_unique:<EOL><INDENT>if df[name_entry].duplicated().any():<EOL><INDENT>report_fun('<STR_LIT>'.format(<EOL>name_entry, data_name))<EOL><DEDENT><DEDENT><DEDENT>def data_loader(data):<EOL><INDENT>if isinstance(data, pd.DataFrame):<EOL><INDENT>ret = data<EOL>test_for_unique_names(data)<EOL><DEDENT>else:<EOL><INDENT>ret = pd.read_table(data, sep='<STR_LIT:\t>', encoding='<STR_LIT:utf-8>',<EOL>converters={str_col: str<EOL>for str_col in must_be_string})<EOL>test_for_unique_names(ret, data)<EOL><DEDENT>return ret<EOL><DEDENT>basic_df = data_loader(country_data)<EOL>if only_UNmember:<EOL><INDENT>basic_df.dropna(subset=['<STR_LIT>'], inplace=True)<EOL><DEDENT>if not include_obsolete:<EOL><INDENT>basic_df = basic_df[basic_df.obsolete.isnull()]<EOL><DEDENT>if additional_data is None:<EOL><INDENT>additional_data = []<EOL><DEDENT>if not isinstance(additional_data, list):<EOL><INDENT>additional_data = [additional_data]<EOL><DEDENT>add_data = [data_loader(df) for df in additional_data]<EOL>self.data = pd.concat([basic_df] + add_data, ignore_index=True,<EOL>axis=<NUM_LIT:0>)<EOL>test_for_unique_names(<EOL>self.data,<EOL>data_name='<STR_LIT>',<EOL>report_fun=logging.warning)<EOL>for name_entry in must_be_unique:<EOL><INDENT>self.data.drop_duplicates(subset=[name_entry],<EOL>keep='<STR_LIT>', inplace=True)<EOL><DEDENT>self.data.reset_index(drop=True, inplace=True)<EOL>self.regexes = [re.compile(entry, re.IGNORECASE)<EOL>for entry in self.data.regex]<EOL>
|
Parameters
----------
country_data : Pandas DataFrame or path to data file
This is by default set to COUNTRY_DATA_FILE - the standard
(tested) country list for coco.
additional_data: (list of) Pandas DataFrames or data files
Additioanl data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
only_UNmember: boolean, optional
If True, only load countries currently being UN members from
the standard data file. If False (default) load the full list
of countries. In this case, also countries currently not existing
(USSR) or with overlapping territories are included.
include_obsolete: boolean, optional
If True, includes countries that have become obsolete. If
False (default) only includes currently valid countries.
|
f5198:c0:m1
|
def convert(self, names, src=None, to='<STR_LIT>', enforce_list=False,<EOL>not_found='<STR_LIT>',<EOL>exclude_prefix=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):
|
<EOL>names = list(names) if (<EOL>isinstance(names, tuple) or<EOL>isinstance(names, set)) else names<EOL>names = names if isinstance(names, list) else [names]<EOL>names = [str(n) for n in names]<EOL>outlist = names.copy()<EOL>to = [self._validate_input_para(to, self.data.columns)]<EOL>exclude_split = {name: self._separate_exclude_cases(name,<EOL>exclude_prefix)<EOL>for name in names}<EOL>for ind_names, current_name in enumerate(names):<EOL><INDENT>spec_name = exclude_split[current_name]['<STR_LIT>']<EOL>if src is None:<EOL><INDENT>src_format = self._get_input_format_from_name(spec_name)<EOL><DEDENT>else:<EOL><INDENT>src_format = self._validate_input_para(src, self.data.columns)<EOL><DEDENT>if src_format.lower() == '<STR_LIT>':<EOL><INDENT>result_list = []<EOL>for ind_regex, ccregex in enumerate(self.regexes):<EOL><INDENT>if ccregex.search(spec_name):<EOL><INDENT>result_list.append(<EOL>self.data.ix[ind_regex, to].values[<NUM_LIT:0>])<EOL><DEDENT>if len(result_list) > <NUM_LIT:1>:<EOL><INDENT>logging.warning('<STR_LIT>'<EOL>'<STR_LIT>'.format(spec_name))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>_match_col = self.data[src_format].astype(<EOL>str).str.replace('<STR_LIT>', '<STR_LIT>')<EOL>result_list = [etr[<NUM_LIT:0>] for etr in<EOL>self.data[_match_col.str.contains(<EOL>'<STR_LIT>' + spec_name + '<STR_LIT:$>', flags=re.IGNORECASE,<EOL>na=False)][to].values]<EOL><DEDENT>if len(result_list) == <NUM_LIT:0>:<EOL><INDENT>logging.warning(<EOL>'<STR_LIT>'.format(spec_name, src_format))<EOL>_fillin = not_found or spec_name<EOL>outlist[ind_names] = [_fillin] if enforce_list else _fillin<EOL><DEDENT>else:<EOL><INDENT>outlist[ind_names] = []<EOL>for etr in result_list:<EOL><INDENT>try:<EOL><INDENT>conv_etr = int(etr)<EOL><DEDENT>except ValueError:<EOL><INDENT>conv_etr = etr<EOL><DEDENT>outlist[ind_names].append(conv_etr)<EOL><DEDENT>if len(outlist[ind_names]) == <NUM_LIT:1> and enforce_list is False:<EOL><INDENT>outlist[ind_names] = outlist[ind_names][<NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT>if (len(outlist) == <NUM_LIT:1>) and not enforce_list:<EOL><INDENT>return outlist[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return outlist<EOL><DEDENT>
|
Convert names from a list to another list.
Note
----
A lot of the functionality can also be done directly in Pandas
DataFrames.
For example:
coco = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
coco.data[coco.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert
to 'to' classification
src : str, optional
Source classification. If None (default), each passed name is
checked if it is a number (assuming UNnumeric) or 2 (ISO2) or
3 (ISO3) characters long; for longer names 'regex' is assumed.
to : str, optional
Output classification (valid index of the country_data file),
default: ISO3
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was
passed) or to be a list of lists (if multiple names were passed).
If False (default), the output will be a string (if only one name
was passed) or a list of str and/or lists (str if a one to one
matching, list otherwise).
not_found : str, optional
Fill in value for none found entries. If None, keep the input value
(default: 'not found')
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Default: ['excl\\w.*', 'without', 'w/o'])
Returns
-------
list or str, depending on enforce_list
|
f5198:c0:m2
|
def EU28as(self, to='<STR_LIT>'):
|
if type(to) is str:<EOL><INDENT>to = [to]<EOL><DEDENT>return self.data[self.data.EU < <NUM_LIT>][to]<EOL>
|
Return EU28 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
|
f5198:c0:m3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.